Ejercicio 3: Modelos de Deep Learning#
Importar Librerías#
# Load libraries
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import plotly.graph_objects as go
from sklearn.preprocessing import MinMaxScaler
# Load sklearn
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
from statsmodels.tsa.stattools import adfuller
from statsmodels.stats.diagnostic import acorr_ljungbox
from statsmodels.stats.stattools import jarque_bera
from statsmodels.tsa.stattools import acf
from statsmodels.tsa.stattools import pacf
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
from sklearn.metrics import r2_score
import statsmodels.api as sm
from sklearn.metrics import mean_absolute_error
import itertools
from statsmodels.tools.sm_exceptions import ConvergenceWarning
from statsmodels.tsa.holtwinters import ExponentialSmoothing
from statsmodels.tsa.holtwinters import SimpleExpSmoothing
from statsmodels.tsa.seasonal import seasonal_decompose
import statsmodels.tsa.api as smt
import matplotlib.gridspec as gridspec
from statsmodels.tsa.arima.model import ARIMA
import warnings
import pickle
import os
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import numpy as np
import scipy.stats as stats
from statsmodels.tsa.stattools import acf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import statsmodels.api as sm
from statsmodels.graphics.gofplots import qqplot
from arch import arch_model
#librerías para Perceptrón Multicapa
import sys
import tensorflow as tf
from tensorflow import keras
from keras.layers import Dense, Input, Dropout
from keras.optimizers import SGD
from keras.models import Model
from keras.models import load_model
from keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import Sequential
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras import layers
from tensorflow.keras.wrappers.scikit_learn import KerasRegressor
#librerías para LSTM
from keras.layers import Dense, Input, Dropout
from keras.layers import LSTM
from keras.optimizers import SGD
from keras.models import Model
from keras.models import load_model
from keras.callbacks import ModelCheckpoint
#Omitier warnings
import warnings
import os
warnings.filterwarnings("ignore")
warnings.filterwarnings("ignore", category=UserWarning, message=".*SGD.*")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Ignora warnings, muestra solo errores.
Cargar el dataset#
A continuación usamos la función read_cvs() de la libreria pandaspara cargar los datos a nuestro entorno de trabajo.
df = pd.read_csv("https://raw.githubusercontent.com/lihkir/Data/refs/heads/main/Bitcoin%20Historical%20Data.csv", sep=",")
Procesamiento de Datos#
Procedemos a renombrar las variables que contienen caracteres especiales y/o espacios para evitar inconvenientes durante el procesamiento de los datos.
df_1 = df.rename(columns={
'Vol.': 'Vol',
'Change %': 'Change'
})
df_1.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 4999 entries, 0 to 4998
Data columns (total 7 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Date 4999 non-null object
1 Price 4999 non-null object
2 Open 4999 non-null object
3 High 4999 non-null object
4 Low 4999 non-null object
5 Vol 4993 non-null object
6 Change 4999 non-null object
dtypes: object(7)
memory usage: 273.5+ KB
En este momento, procederemos a convertir el tipo de dato de la columna fecha a datetime y las columnas tipo objet a float64.
df_1['Date'] = pd.to_datetime(df_1['Date'])
df_1[['Price', 'Open', 'High', 'Low']] = df_1[['Price', 'Open', 'High', 'Low']].replace(',', '', regex=True).astype(float) #Eliminar coma (,) de columnas Pice, Open, High, Low y convertir a float
La columna Vol incluye letras que representan múltiplos; por lo tanto, a continuación, revisaremos cuántas de estas letras existen.
final_letters = df_1['Vol'].str.extract(r'([A-Za-z])$', expand=False) # Extraer las últimas letras de cada registro en la columna 'Vol'
print(final_letters.dropna().unique())
['K' 'M' 'B']
De manera similar, la columna Change contiene un carácter especial (%). Procederemos a reemplazar cada múltiplo, eliminar el carácter % y convertir la columna Vol a tipo int y la columna Change a tipo int con la función con_vol_a_numerico().
def con_vol_a_numerico(Vol):
if isinstance(Vol, str):
Vol = Vol.replace(',', '')
if 'K' in Vol:
return float(Vol.replace('K', '')) * 1_000
elif 'M' in Vol:
return float(Vol.replace('M', '')) * 1_000_000
elif 'B' in Vol:
return float(Vol.replace('B', '')) * 1_000_000_000
else:
return float(Vol)
else:
return Vol
df_1['Vol'] = df_1['Vol'].apply(con_vol_a_numerico) #Indexación de parámetros de función
df_1['Change'] = df_1['Change'].str.replace('%', '').astype(float) / 100 # Cambio columna Change
df_1['Close'] = df_1['Open'] * (1 + df_1['Change'])
Imputación Datos Faltantes #
A continuación extraemos el número de datos faltantes y el porcentaje para cada característica del conjunto de datos.
missing_values = df_1.isna().sum()
total_rows = df_1.shape[0]
missing_vars = missing_values[missing_values > 0]
missing_info_df = pd.DataFrame({
'Datos faltantes': missing_vars,
'Porcentaje (%)': round((missing_vars / total_rows) * 100,2)})
missing_info_df
| Datos faltantes | Porcentaje (%) | |
|---|---|---|
| Vol | 6 | 0.12 |
Se encontraron 6 datos faltantes en relación al volumen de Bitcoin transaccionado en los registros del conjunto de datos analizado. Procedemos a realizar el análisis de su distribución para escoger la mejor técnica de imputación.
Asimetría#
A continuación se estima el sesgo de cada atributo utilizando la función skew():
# Skew for each attribute
pd.set_option('display.precision', 4)
df_a = df_1.drop(columns=['Date'])
df_a.skew()
Price 1.6359
Open 1.6353
High 1.6380
Low 1.6303
Vol 18.6076
Change 22.6507
Close 1.6359
dtype: float64
Para evaluar si las variables presentan sesgo, se comparará el valor absoluto de sus coeficientes con un umbral, como 1.0. Se considera que los coeficientes más cercanos a cero indican un menor sesgo. Los valores de inclinación reflejan una tendencia positiva (hacia la derecha) o negativa (hacia la izquierda). En el caso de la variable Change, se observa un sesgo considerable hacia la derecha, mientras que las demás variables, incluyendo la Vol muestran un ligero sesgo también hacia la derecha, por tanto procedemos a imputar los datos faltantes con la mediana.
Imputación datos faltantes#
median_vol = df_1['Vol'].median() # Calcular la mediana
df_1['Vol'].fillna(median_vol, inplace=True) # Imputar la mediana
Verificamos que la imputación se haya realizado de forma correcta.
df_1.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 4999 entries, 0 to 4998
Data columns (total 8 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Date 4999 non-null datetime64[ns]
1 Price 4999 non-null float64
2 Open 4999 non-null float64
3 High 4999 non-null float64
4 Low 4999 non-null float64
5 Vol 4999 non-null float64
6 Change 4999 non-null float64
7 Close 4999 non-null float64
dtypes: datetime64[ns](1), float64(7)
memory usage: 312.6 KB
Se observa que no existen datos faltantes en la columna Vol
df_1_sorted = df_1.sort_values(by='Date')
df_1_st = df_1_sorted.copy()
Construcción de Modelos estadísticos#
En esta sesión se realizará la construcción de los modelos predictivos relacionados a continuación, sobre las variables precio, retorno acumulado y volatilidad para las diferentes ventanas (\(\omega\)) de 7, 14, 21, 28 días (ver imagen).
MLP (Multilayer Perceptrón)
RNN (Must investigate)
LSTM (Long short-term memory)
Se requiere calcular el retorno acumulado y la volatilidad para esto se emplean las siguientes ecuaciones:
Retorno acumulado diario \(\{A_t\}_{t=1}^T\) donde:
donde \(P_t\text{, }t=1,2,3 \ldots , \text{T}\) es la columna Price en el dataset de Bitcoin y T es el tiempo final.
La Volatilidad \(\{\sigma\}_{t=1}^T\) es la desviación estándar de los retornos diarios.
siendo \(\omega\) el número de retornos diarios considerados para calcular la desviación estándar
obtenida por ventanas móviles de longitud \(\omega\), donde \(\omega\) esta dado en días, y esta desviación es calculada sobre la serie de retornos diarios.
A continuación se define función para calculo de retorno acumulado y volatilidad.
def calcular_retorno_acumulado(df):
# Calcular los retornos diarios R_t
df['R_t'] = (df['Price'] - df['Price'].shift(1)) / df['Price'].shift(1)
# Asignar 0 al primer registro de R_t
df['R_t'].fillna(0, inplace=True)
# Calcular el retorno acumulado A_t
df['A_t'] = df['R_t'].cumsum()
return df
def calcular_volatilidad(df, omegas):
if 'R_t' not in df.columns:
df['R_t'] = (df['Price'] - df['Price'].shift(1)) / df['Price'].shift(1)
df['R_t'].fillna(0, inplace=True) # Asignar 0 al primer registro
# Calcular la volatilidad para cada omega en la lista
for omega in omegas:
col_name = f'Volatilidad_{omega}'
df[col_name] = df['R_t'].rolling(window=omega).std()
df[col_name].fillna(0, inplace=True) # Reemplazar NaN por 0
return df
A continuación se realiza indexación de parámetros sobre la función calcular_retorno_acumulado() y calcular_volatilidad().
st_retorno =calcular_retorno_acumulado(df_1_st)
omegas = [7, 14, 21, 28] # Lista de diferentes tamaños de ventana
st_retorno_volatilidad = calcular_volatilidad(df_1_st, omegas)
A continuación se imprimen los resultados de los calculos anteriores.
df_1_st
| Date | Price | Open | High | Low | Vol | Change | Close | R_t | A_t | Volatilidad_7 | Volatilidad_14 | Volatilidad_21 | Volatilidad_28 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 4998 | 2010-07-18 | 0.1 | 0.0 | 0.1 | 0.1 | 80.0 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |
| 4997 | 2010-07-19 | 0.1 | 0.1 | 0.1 | 0.1 | 570.0 | 0.0000 | 0.1000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |
| 4996 | 2010-07-20 | 0.1 | 0.1 | 0.1 | 0.1 | 260.0 | 0.0000 | 0.1000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |
| 4995 | 2010-07-21 | 0.1 | 0.1 | 0.1 | 0.1 | 580.0 | 0.0000 | 0.1000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |
| 4994 | 2010-07-22 | 0.1 | 0.1 | 0.1 | 0.1 | 2160.0 | 0.0000 | 0.1000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 | 0.0000 |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 4 | 2024-03-20 | 67854.0 | 62046.8 | 68029.5 | 60850.9 | 133530.0 | 0.0935 | 67848.1758 | 0.0935 | 23.7198 | 0.0607 | 0.0445 | 0.0441 | 0.0434 |
| 3 | 2024-03-21 | 65503.8 | 67860.0 | 68161.7 | 64616.1 | 75260.0 | -0.0346 | 65512.0440 | -0.0346 | 23.6852 | 0.0613 | 0.0455 | 0.0446 | 0.0440 |
| 2 | 2024-03-22 | 63785.5 | 65501.5 | 66633.3 | 62328.3 | 72430.0 | -0.0262 | 63785.3607 | -0.0262 | 23.6589 | 0.0613 | 0.0456 | 0.0449 | 0.0444 |
| 1 | 2024-03-23 | 64037.8 | 63785.6 | 65972.4 | 63074.9 | 35110.0 | 0.0040 | 64040.7424 | 0.0040 | 23.6629 | 0.0573 | 0.0456 | 0.0448 | 0.0444 |
| 0 | 2024-03-24 | 67211.9 | 64036.5 | 67587.8 | 63812.9 | 65590.0 | 0.0496 | 67212.7104 | 0.0496 | 23.7125 | 0.0577 | 0.0478 | 0.0459 | 0.0450 |
4999 rows × 14 columns
División de los datos#
A continuación se define función create_time_series_datasets()en Python para dividir el conjunto de datos en entrenamiento, validación y test teniendo en cuenta los horizontes: τ = 7,14,21,28 (data de validación y prueba). Al final debe unir las predicciones de cada punto para obtener el horizonte predicho completo de τ días, luego debe compararlo con el resultado usando como output el y multivariado de τ días, tal como se muestra en la Figura 5.
Para el conjunto de entrenamiento se consideran todas los casos asociados con las combinaciones:
def create_time_series_datasets(df, column_name, tau):
"""
Crea conjuntos de entrenamiento, validación y prueba para una serie de tiempo.
Parameters:
df: Serie de tiempo (pandas DataFrame).
column_name: Nombre de la columna que contiene los datos.
tau: Horizonte (número de días).
Returns:
X_train, y_train, X_val, y_val, X_test, y_test: Matrices correspondientes.
"""
# Asegurarse de que df sea una Serie
if isinstance(df, pd.DataFrame):
df = df[column_name].values # Convertir a un array de numpy
n = len(df)
# Verificar que haya suficientes datos para entrenamiento, validación y prueba
if n <= (tau * 3):
raise ValueError("No hay suficientes datos para crear los conjuntos.")
# Crear matrices de entrenamiento
M_rows_train = n - (tau * 3) # Filas para entrenamiento
X_train, y_train = [], []
# Generar conjunto de entrenamiento
for i in range(tau, M_rows_train): # las features dependen de tau
X_train.append(df[i-tau:i]) # Regressors (tau días)
y_train.append(df[i]) # Target
X_train = np.array(X_train)
y_train = np.array(y_train).reshape(-1, 1)
# Ajustar las filas para validación y prueba
val_size = tau # Tamaño de validación basado en tau
test_size = tau # Tamaño de prueba basado en tau
# Generar conjunto de validación
X_val, y_val = [], []
for i in range(M_rows_train, M_rows_train + val_size):
X_val.append(df[i-tau:i]) # Regressors (tau días)
y_val.append(df[i]) # Target solo un valor para cada y_val
X_val = np.array(X_val)
y_val = np.array(y_val).reshape(-1, 1)
# Generar conjunto de prueba
X_test, y_test = [], []
for i in range(M_rows_train + val_size, M_rows_train + val_size + test_size):
X_test.append(df[i-tau:i]) # Regressors (tau días)
y_test.append(df[i]) # Target solo un valor para cada y_test
X_test = np.array(X_test)
y_test = np.array(y_test).reshape(-1, 1)
return X_train, y_train, X_val, y_val, X_test, y_test
A continuación se define función data_plot() para extraer los datos dependiendo del horizonte para realizar el gráfico de predicciones.
def data_plot(data_rn, horizonte):
val_train = len(data_rn) - (horizonte * 2)
val_val = val_train + horizonte
val_test = val_val + horizonte
# Filtrar los datos según el rango especificado usando iloc
data_train_plot = data_rn.iloc[0:val_train] # Filtra de la fila 0 a la 4887
data_train_plot.index = data_rn.index[0:val_train] # Asigna el índice correspondiente
data_train_plot = data_train_plot.squeeze()
data_val_plot = data_rn.iloc[val_train:val_val] # Filtra de la fila 4888 a la 4916
data_val_plot.index = data_rn.index[val_train:val_val] # Asigna el índice correspondiente
data_val_plot = data_val_plot.squeeze()
data_test_plot = data_rn.iloc[val_val:val_test] # Filtra de la fila 4917 a la 4998
data_test_plot.index = data_rn.index[val_val:val_test] # Asigna el índice correspondiente
data_test_plot = data_test_plot.squeeze()
# Verifica los resultados
print("Datos de entrenamiento:")
print(f"{data_train_plot}{horizonte}")
print("\nDatos de validación:")
print(f"{data_val_plot}{horizonte}")
print("\nDatos de prueba:")
print(f"{data_test_plot}{horizonte}")
return data_train_plot, data_val_plot, data_test_plot
A continuación se indexan los parámetros sobre la función create_time_series_datasets() para generar la división de los pliegues (folds) para el conjunto de entrenamiento, validación y test.
Precio (Price)#
Horizonte de 7 días (\(\tau=7\))
tau7 = 7
X_train_price7, y_train_price7, X_val_price7, y_val_price7, X_test_price7, y_test_price7 = create_time_series_datasets(df_1_st,'Price',tau7)
# Imprimir las dimensiones
print("X_train shape:", X_train_price7.shape)
print("y_train shape:", y_train_price7.shape)
print("X_val shape:", X_val_price7.shape)
print("y_val shape:", y_val_price7.shape)
print("X_test shape:", X_test_price7.shape)
print("y_test shape:", y_test_price7.shape)
X_train shape: (4971, 7)
y_train shape: (4971, 1)
X_val shape: (7, 7)
y_val shape: (7, 1)
X_test shape: (7, 7)
y_test shape: (7, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 7\).
X_train_price7
array([[1.00000e-01, 1.00000e-01, 1.00000e-01, ..., 1.00000e-01,
1.00000e-01, 1.00000e-01],
[1.00000e-01, 1.00000e-01, 1.00000e-01, ..., 1.00000e-01,
1.00000e-01, 1.00000e-01],
[1.00000e-01, 1.00000e-01, 1.00000e-01, ..., 1.00000e-01,
1.00000e-01, 1.00000e-01],
...,
[5.07405e+04, 5.15716e+04, 5.17227e+04, ..., 5.70562e+04,
6.24676e+04, 6.11693e+04],
[5.15716e+04, 5.17227e+04, 5.44951e+04, ..., 6.24676e+04,
6.11693e+04, 6.23977e+04],
[5.17227e+04, 5.44951e+04, 5.70562e+04, ..., 6.11693e+04,
6.23977e+04, 6.19945e+04]])
y_train_price7
array([[1.00000e-01],
[1.00000e-01],
[1.00000e-01],
...,
[6.23977e+04],
[6.19945e+04],
[6.31358e+04]])
X_val_price7
array([[54495.1, 57056.2, 62467.6, 61169.3, 62397.7, 61994.5, 63135.8],
[57056.2, 62467.6, 61169.3, 62397.7, 61994.5, 63135.8, 68270.1],
[62467.6, 61169.3, 62397.7, 61994.5, 63135.8, 68270.1, 63792.6],
[61169.3, 62397.7, 61994.5, 63135.8, 68270.1, 63792.6, 66080.4],
[62397.7, 61994.5, 63135.8, 68270.1, 63792.6, 66080.4, 66855.3],
[61994.5, 63135.8, 68270.1, 63792.6, 66080.4, 66855.3, 68172. ],
[63135.8, 68270.1, 63792.6, 66080.4, 66855.3, 68172. , 68366.5]])
y_val_price7
array([[68270.1],
[63792.6],
[66080.4],
[66855.3],
[68172. ],
[68366.5],
[68964.8]])
X_test_price7
array([[68270.1, 63792.6, 66080.4, 66855.3, 68172. , 68366.5, 68964.8],
[63792.6, 66080.4, 66855.3, 68172. , 68366.5, 68964.8, 72099.1],
[66080.4, 66855.3, 68172. , 68366.5, 68964.8, 72099.1, 71470.2],
[66855.3, 68172. , 68366.5, 68964.8, 72099.1, 71470.2, 73066.3],
[68172. , 68366.5, 68964.8, 72099.1, 71470.2, 73066.3, 71387.5],
[68366.5, 68964.8, 72099.1, 71470.2, 73066.3, 71387.5, 69463.7],
[68964.8, 72099.1, 71470.2, 73066.3, 71387.5, 69463.7, 65314.2]])
y_test_price7
array([[72099.1],
[71470.2],
[73066.3],
[71387.5],
[69463.7],
[65314.2],
[68391.2]])
Horizonte de 14 días (\(\tau=14\))
tau14 = 14
X_train_price14, y_train_price14, X_val_price14, y_val_price14, X_test_price14, y_test_price14 = create_time_series_datasets(df_1_st,'Price',tau14)
# Imprimir las dimensiones
print("X_train shape:", X_train_price14.shape)
print("y_train shape:", y_train_price14.shape)
print("X_val shape:", X_val_price14.shape)
print("y_val shape:", y_val_price14.shape)
print("X_test shape:", X_test_price14.shape)
print("y_test shape:", y_test_price14.shape)
X_train shape: (4943, 14)
y_train shape: (4943, 1)
X_val shape: (14, 14)
y_val shape: (14, 1)
X_test shape: (14, 14)
y_test shape: (14, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 14\).
X_train_price14
array([[1.00000e-01, 1.00000e-01, 1.00000e-01, ..., 1.00000e-01,
1.00000e-01, 1.00000e-01],
[1.00000e-01, 1.00000e-01, 1.00000e-01, ..., 1.00000e-01,
1.00000e-01, 1.00000e-01],
[1.00000e-01, 1.00000e-01, 1.00000e-01, ..., 1.00000e-01,
1.00000e-01, 1.00000e-01],
...,
[4.18113e+04, 4.21209e+04, 4.20307e+04, ..., 4.30877e+04,
4.43398e+04, 4.52933e+04],
[4.21209e+04, 4.20307e+04, 4.32998e+04, ..., 4.43398e+04,
4.52933e+04, 4.71275e+04],
[4.20307e+04, 4.32998e+04, 4.29462e+04, ..., 4.52933e+04,
4.71275e+04, 4.77582e+04]])
y_train_price14
array([[1.00000e-01],
[1.00000e-01],
[1.00000e-01],
...,
[4.71275e+04],
[4.77582e+04],
[4.82773e+04]])
X_val_price14
array([[43299.8, 42946.2, 42580.5, 43081.4, 43194.7, 43005.7, 42581.4,
42697.2, 43087.7, 44339.8, 45293.3, 47127.5, 47758.2, 48277.3],
[42946.2, 42580.5, 43081.4, 43194.7, 43005.7, 42581.4, 42697.2,
43087.7, 44339.8, 45293.3, 47127.5, 47758.2, 48277.3, 49941.3],
[42580.5, 43081.4, 43194.7, 43005.7, 42581.4, 42697.2, 43087.7,
44339.8, 45293.3, 47127.5, 47758.2, 48277.3, 49941.3, 49716. ],
[43081.4, 43194.7, 43005.7, 42581.4, 42697.2, 43087.7, 44339.8,
45293.3, 47127.5, 47758.2, 48277.3, 49941.3, 49716. , 51782.4],
[43194.7, 43005.7, 42581.4, 42697.2, 43087.7, 44339.8, 45293.3,
47127.5, 47758.2, 48277.3, 49941.3, 49716. , 51782.4, 51901.3],
[43005.7, 42581.4, 42697.2, 43087.7, 44339.8, 45293.3, 47127.5,
47758.2, 48277.3, 49941.3, 49716. , 51782.4, 51901.3, 52134.2],
[42581.4, 42697.2, 43087.7, 44339.8, 45293.3, 47127.5, 47758.2,
48277.3, 49941.3, 49716. , 51782.4, 51901.3, 52134.2, 51646. ],
[42697.2, 43087.7, 44339.8, 45293.3, 47127.5, 47758.2, 48277.3,
49941.3, 49716. , 51782.4, 51901.3, 52134.2, 51646. , 52117.5],
[43087.7, 44339.8, 45293.3, 47127.5, 47758.2, 48277.3, 49941.3,
49716. , 51782.4, 51901.3, 52134.2, 51646. , 52117.5, 51783.6],
[44339.8, 45293.3, 47127.5, 47758.2, 48277.3, 49941.3, 49716. ,
51782.4, 51901.3, 52134.2, 51646. , 52117.5, 51783.6, 52263.5],
[45293.3, 47127.5, 47758.2, 48277.3, 49941.3, 49716. , 51782.4,
51901.3, 52134.2, 51646. , 52117.5, 51783.6, 52263.5, 51858.2],
[47127.5, 47758.2, 48277.3, 49941.3, 49716. , 51782.4, 51901.3,
52134.2, 51646. , 52117.5, 51783.6, 52263.5, 51858.2, 51320.4],
[47758.2, 48277.3, 49941.3, 49716. , 51782.4, 51901.3, 52134.2,
51646. , 52117.5, 51783.6, 52263.5, 51858.2, 51320.4, 50740.5],
[48277.3, 49941.3, 49716. , 51782.4, 51901.3, 52134.2, 51646. ,
52117.5, 51783.6, 52263.5, 51858.2, 51320.4, 50740.5, 51571.6]])
y_val_price14
array([[49941.3],
[49716. ],
[51782.4],
[51901.3],
[52134.2],
[51646. ],
[52117.5],
[51783.6],
[52263.5],
[51858.2],
[51320.4],
[50740.5],
[51571.6],
[51722.7]])
y_test_price14
array([[54495.1],
[57056.2],
[62467.6],
[61169.3],
[62397.7],
[61994.5],
[63135.8],
[68270.1],
[63792.6],
[66080.4],
[66855.3],
[68172. ],
[68366.5],
[68964.8]])
X_test_price14
array([[49941.3, 49716. , 51782.4, 51901.3, 52134.2, 51646. , 52117.5,
51783.6, 52263.5, 51858.2, 51320.4, 50740.5, 51571.6, 51722.7],
[49716. , 51782.4, 51901.3, 52134.2, 51646. , 52117.5, 51783.6,
52263.5, 51858.2, 51320.4, 50740.5, 51571.6, 51722.7, 54495.1],
[51782.4, 51901.3, 52134.2, 51646. , 52117.5, 51783.6, 52263.5,
51858.2, 51320.4, 50740.5, 51571.6, 51722.7, 54495.1, 57056.2],
[51901.3, 52134.2, 51646. , 52117.5, 51783.6, 52263.5, 51858.2,
51320.4, 50740.5, 51571.6, 51722.7, 54495.1, 57056.2, 62467.6],
[52134.2, 51646. , 52117.5, 51783.6, 52263.5, 51858.2, 51320.4,
50740.5, 51571.6, 51722.7, 54495.1, 57056.2, 62467.6, 61169.3],
[51646. , 52117.5, 51783.6, 52263.5, 51858.2, 51320.4, 50740.5,
51571.6, 51722.7, 54495.1, 57056.2, 62467.6, 61169.3, 62397.7],
[52117.5, 51783.6, 52263.5, 51858.2, 51320.4, 50740.5, 51571.6,
51722.7, 54495.1, 57056.2, 62467.6, 61169.3, 62397.7, 61994.5],
[51783.6, 52263.5, 51858.2, 51320.4, 50740.5, 51571.6, 51722.7,
54495.1, 57056.2, 62467.6, 61169.3, 62397.7, 61994.5, 63135.8],
[52263.5, 51858.2, 51320.4, 50740.5, 51571.6, 51722.7, 54495.1,
57056.2, 62467.6, 61169.3, 62397.7, 61994.5, 63135.8, 68270.1],
[51858.2, 51320.4, 50740.5, 51571.6, 51722.7, 54495.1, 57056.2,
62467.6, 61169.3, 62397.7, 61994.5, 63135.8, 68270.1, 63792.6],
[51320.4, 50740.5, 51571.6, 51722.7, 54495.1, 57056.2, 62467.6,
61169.3, 62397.7, 61994.5, 63135.8, 68270.1, 63792.6, 66080.4],
[50740.5, 51571.6, 51722.7, 54495.1, 57056.2, 62467.6, 61169.3,
62397.7, 61994.5, 63135.8, 68270.1, 63792.6, 66080.4, 66855.3],
[51571.6, 51722.7, 54495.1, 57056.2, 62467.6, 61169.3, 62397.7,
61994.5, 63135.8, 68270.1, 63792.6, 66080.4, 66855.3, 68172. ],
[51722.7, 54495.1, 57056.2, 62467.6, 61169.3, 62397.7, 61994.5,
63135.8, 68270.1, 63792.6, 66080.4, 66855.3, 68172. , 68366.5]])
Horizonte de 21 días (\(\tau=21\))
tau21 = 21
X_train_price21, y_train_price21, X_val_price21, y_val_price21, X_test_price21, y_test_price21 = create_time_series_datasets(df_1_st,'Price',tau21)
# Imprimir las dimensiones
print("X_train shape:", X_train_price21.shape)
print("y_train shape:", y_train_price21.shape)
print("X_val shape:", X_val_price21.shape)
print("y_val shape:", y_val_price21.shape)
print("X_test shape:", X_test_price21.shape)
print("y_test shape:", y_test_price21.shape)
X_train shape: (4915, 21)
y_train shape: (4915, 1)
X_val shape: (21, 21)
y_val shape: (21, 1)
X_test shape: (21, 21)
y_test shape: (21, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 21\).
X_train_price21
array([[1.00000e-01, 1.00000e-01, 1.00000e-01, ..., 1.00000e-01,
1.00000e-01, 1.00000e-01],
[1.00000e-01, 1.00000e-01, 1.00000e-01, ..., 1.00000e-01,
1.00000e-01, 1.00000e-01],
[1.00000e-01, 1.00000e-01, 1.00000e-01, ..., 1.00000e-01,
1.00000e-01, 1.00000e-01],
...,
[4.20724e+04, 4.21367e+04, 4.22725e+04, ..., 4.31455e+04,
4.27687e+04, 4.12927e+04],
[4.21367e+04, 4.22725e+04, 4.41834e+04, ..., 4.27687e+04,
4.12927e+04, 4.16480e+04],
[4.22725e+04, 4.41834e+04, 4.49437e+04, ..., 4.12927e+04,
4.16480e+04, 4.16954e+04]])
y_train_price21
array([[1.00000e-01],
[1.00000e-01],
[1.00000e-01],
...,
[4.16480e+04],
[4.16954e+04],
[4.15832e+04]])
X_val_price21
array([[44183.4, 44943.7, 42836.1, 44157. , 44156.9, 43967.9, 43927.3,
46962.2, 46129. , 46629.3, 46348.2, 42835.9, 42851.3, 41746.1,
42510.7, 43145.5, 42768.7, 41292.7, 41648. , 41695.4, 41583.2],
[44943.7, 42836.1, 44157. , 44156.9, 43967.9, 43927.3, 46962.2,
46129. , 46629.3, 46348.2, 42835.9, 42851.3, 41746.1, 42510.7,
43145.5, 42768.7, 41292.7, 41648. , 41695.4, 41583.2, 39556.4],
[42836.1, 44157. , 44156.9, 43967.9, 43927.3, 46962.2, 46129. ,
46629.3, 46348.2, 42835.9, 42851.3, 41746.1, 42510.7, 43145.5,
42768.7, 41292.7, 41648. , 41695.4, 41583.2, 39556.4, 39888.8],
[44157. , 44156.9, 43967.9, 43927.3, 46962.2, 46129. , 46629.3,
46348.2, 42835.9, 42851.3, 41746.1, 42510.7, 43145.5, 42768.7,
41292.7, 41648. , 41695.4, 41583.2, 39556.4, 39888.8, 40086. ],
[44156.9, 43967.9, 43927.3, 46962.2, 46129. , 46629.3, 46348.2,
42835.9, 42851.3, 41746.1, 42510.7, 43145.5, 42768.7, 41292.7,
41648. , 41695.4, 41583.2, 39556.4, 39888.8, 40086. , 39935.7],
[43967.9, 43927.3, 46962.2, 46129. , 46629.3, 46348.2, 42835.9,
42851.3, 41746.1, 42510.7, 43145.5, 42768.7, 41292.7, 41648. ,
41695.4, 41583.2, 39556.4, 39888.8, 40086. , 39935.7, 41811.3],
[43927.3, 46962.2, 46129. , 46629.3, 46348.2, 42835.9, 42851.3,
41746.1, 42510.7, 43145.5, 42768.7, 41292.7, 41648. , 41695.4,
41583.2, 39556.4, 39888.8, 40086. , 39935.7, 41811.3, 42120.9],
[46962.2, 46129. , 46629.3, 46348.2, 42835.9, 42851.3, 41746.1,
42510.7, 43145.5, 42768.7, 41292.7, 41648. , 41695.4, 41583.2,
39556.4, 39888.8, 40086. , 39935.7, 41811.3, 42120.9, 42030.7],
[46129. , 46629.3, 46348.2, 42835.9, 42851.3, 41746.1, 42510.7,
43145.5, 42768.7, 41292.7, 41648. , 41695.4, 41583.2, 39556.4,
39888.8, 40086. , 39935.7, 41811.3, 42120.9, 42030.7, 43299.8],
[46629.3, 46348.2, 42835.9, 42851.3, 41746.1, 42510.7, 43145.5,
42768.7, 41292.7, 41648. , 41695.4, 41583.2, 39556.4, 39888.8,
40086. , 39935.7, 41811.3, 42120.9, 42030.7, 43299.8, 42946.2],
[46348.2, 42835.9, 42851.3, 41746.1, 42510.7, 43145.5, 42768.7,
41292.7, 41648. , 41695.4, 41583.2, 39556.4, 39888.8, 40086. ,
39935.7, 41811.3, 42120.9, 42030.7, 43299.8, 42946.2, 42580.5],
[42835.9, 42851.3, 41746.1, 42510.7, 43145.5, 42768.7, 41292.7,
41648. , 41695.4, 41583.2, 39556.4, 39888.8, 40086. , 39935.7,
41811.3, 42120.9, 42030.7, 43299.8, 42946.2, 42580.5, 43081.4],
[42851.3, 41746.1, 42510.7, 43145.5, 42768.7, 41292.7, 41648. ,
41695.4, 41583.2, 39556.4, 39888.8, 40086. , 39935.7, 41811.3,
42120.9, 42030.7, 43299.8, 42946.2, 42580.5, 43081.4, 43194.7],
[41746.1, 42510.7, 43145.5, 42768.7, 41292.7, 41648. , 41695.4,
41583.2, 39556.4, 39888.8, 40086. , 39935.7, 41811.3, 42120.9,
42030.7, 43299.8, 42946.2, 42580.5, 43081.4, 43194.7, 43005.7],
[42510.7, 43145.5, 42768.7, 41292.7, 41648. , 41695.4, 41583.2,
39556.4, 39888.8, 40086. , 39935.7, 41811.3, 42120.9, 42030.7,
43299.8, 42946.2, 42580.5, 43081.4, 43194.7, 43005.7, 42581.4],
[43145.5, 42768.7, 41292.7, 41648. , 41695.4, 41583.2, 39556.4,
39888.8, 40086. , 39935.7, 41811.3, 42120.9, 42030.7, 43299.8,
42946.2, 42580.5, 43081.4, 43194.7, 43005.7, 42581.4, 42697.2],
[42768.7, 41292.7, 41648. , 41695.4, 41583.2, 39556.4, 39888.8,
40086. , 39935.7, 41811.3, 42120.9, 42030.7, 43299.8, 42946.2,
42580.5, 43081.4, 43194.7, 43005.7, 42581.4, 42697.2, 43087.7],
[41292.7, 41648. , 41695.4, 41583.2, 39556.4, 39888.8, 40086. ,
39935.7, 41811.3, 42120.9, 42030.7, 43299.8, 42946.2, 42580.5,
43081.4, 43194.7, 43005.7, 42581.4, 42697.2, 43087.7, 44339.8],
[41648. , 41695.4, 41583.2, 39556.4, 39888.8, 40086. , 39935.7,
41811.3, 42120.9, 42030.7, 43299.8, 42946.2, 42580.5, 43081.4,
43194.7, 43005.7, 42581.4, 42697.2, 43087.7, 44339.8, 45293.3],
[41695.4, 41583.2, 39556.4, 39888.8, 40086. , 39935.7, 41811.3,
42120.9, 42030.7, 43299.8, 42946.2, 42580.5, 43081.4, 43194.7,
43005.7, 42581.4, 42697.2, 43087.7, 44339.8, 45293.3, 47127.5],
[41583.2, 39556.4, 39888.8, 40086. , 39935.7, 41811.3, 42120.9,
42030.7, 43299.8, 42946.2, 42580.5, 43081.4, 43194.7, 43005.7,
42581.4, 42697.2, 43087.7, 44339.8, 45293.3, 47127.5, 47758.2]])
y_val_price21
array([[39556.4],
[39888.8],
[40086. ],
[39935.7],
[41811.3],
[42120.9],
[42030.7],
[43299.8],
[42946.2],
[42580.5],
[43081.4],
[43194.7],
[43005.7],
[42581.4],
[42697.2],
[43087.7],
[44339.8],
[45293.3],
[47127.5],
[47758.2],
[48277.3]])
X_test_price21
array([[39556.4, 39888.8, 40086. , 39935.7, 41811.3, 42120.9, 42030.7,
43299.8, 42946.2, 42580.5, 43081.4, 43194.7, 43005.7, 42581.4,
42697.2, 43087.7, 44339.8, 45293.3, 47127.5, 47758.2, 48277.3],
[39888.8, 40086. , 39935.7, 41811.3, 42120.9, 42030.7, 43299.8,
42946.2, 42580.5, 43081.4, 43194.7, 43005.7, 42581.4, 42697.2,
43087.7, 44339.8, 45293.3, 47127.5, 47758.2, 48277.3, 49941.3],
[40086. , 39935.7, 41811.3, 42120.9, 42030.7, 43299.8, 42946.2,
42580.5, 43081.4, 43194.7, 43005.7, 42581.4, 42697.2, 43087.7,
44339.8, 45293.3, 47127.5, 47758.2, 48277.3, 49941.3, 49716. ],
[39935.7, 41811.3, 42120.9, 42030.7, 43299.8, 42946.2, 42580.5,
43081.4, 43194.7, 43005.7, 42581.4, 42697.2, 43087.7, 44339.8,
45293.3, 47127.5, 47758.2, 48277.3, 49941.3, 49716. , 51782.4],
[41811.3, 42120.9, 42030.7, 43299.8, 42946.2, 42580.5, 43081.4,
43194.7, 43005.7, 42581.4, 42697.2, 43087.7, 44339.8, 45293.3,
47127.5, 47758.2, 48277.3, 49941.3, 49716. , 51782.4, 51901.3],
[42120.9, 42030.7, 43299.8, 42946.2, 42580.5, 43081.4, 43194.7,
43005.7, 42581.4, 42697.2, 43087.7, 44339.8, 45293.3, 47127.5,
47758.2, 48277.3, 49941.3, 49716. , 51782.4, 51901.3, 52134.2],
[42030.7, 43299.8, 42946.2, 42580.5, 43081.4, 43194.7, 43005.7,
42581.4, 42697.2, 43087.7, 44339.8, 45293.3, 47127.5, 47758.2,
48277.3, 49941.3, 49716. , 51782.4, 51901.3, 52134.2, 51646. ],
[43299.8, 42946.2, 42580.5, 43081.4, 43194.7, 43005.7, 42581.4,
42697.2, 43087.7, 44339.8, 45293.3, 47127.5, 47758.2, 48277.3,
49941.3, 49716. , 51782.4, 51901.3, 52134.2, 51646. , 52117.5],
[42946.2, 42580.5, 43081.4, 43194.7, 43005.7, 42581.4, 42697.2,
43087.7, 44339.8, 45293.3, 47127.5, 47758.2, 48277.3, 49941.3,
49716. , 51782.4, 51901.3, 52134.2, 51646. , 52117.5, 51783.6],
[42580.5, 43081.4, 43194.7, 43005.7, 42581.4, 42697.2, 43087.7,
44339.8, 45293.3, 47127.5, 47758.2, 48277.3, 49941.3, 49716. ,
51782.4, 51901.3, 52134.2, 51646. , 52117.5, 51783.6, 52263.5],
[43081.4, 43194.7, 43005.7, 42581.4, 42697.2, 43087.7, 44339.8,
45293.3, 47127.5, 47758.2, 48277.3, 49941.3, 49716. , 51782.4,
51901.3, 52134.2, 51646. , 52117.5, 51783.6, 52263.5, 51858.2],
[43194.7, 43005.7, 42581.4, 42697.2, 43087.7, 44339.8, 45293.3,
47127.5, 47758.2, 48277.3, 49941.3, 49716. , 51782.4, 51901.3,
52134.2, 51646. , 52117.5, 51783.6, 52263.5, 51858.2, 51320.4],
[43005.7, 42581.4, 42697.2, 43087.7, 44339.8, 45293.3, 47127.5,
47758.2, 48277.3, 49941.3, 49716. , 51782.4, 51901.3, 52134.2,
51646. , 52117.5, 51783.6, 52263.5, 51858.2, 51320.4, 50740.5],
[42581.4, 42697.2, 43087.7, 44339.8, 45293.3, 47127.5, 47758.2,
48277.3, 49941.3, 49716. , 51782.4, 51901.3, 52134.2, 51646. ,
52117.5, 51783.6, 52263.5, 51858.2, 51320.4, 50740.5, 51571.6],
[42697.2, 43087.7, 44339.8, 45293.3, 47127.5, 47758.2, 48277.3,
49941.3, 49716. , 51782.4, 51901.3, 52134.2, 51646. , 52117.5,
51783.6, 52263.5, 51858.2, 51320.4, 50740.5, 51571.6, 51722.7],
[43087.7, 44339.8, 45293.3, 47127.5, 47758.2, 48277.3, 49941.3,
49716. , 51782.4, 51901.3, 52134.2, 51646. , 52117.5, 51783.6,
52263.5, 51858.2, 51320.4, 50740.5, 51571.6, 51722.7, 54495.1],
[44339.8, 45293.3, 47127.5, 47758.2, 48277.3, 49941.3, 49716. ,
51782.4, 51901.3, 52134.2, 51646. , 52117.5, 51783.6, 52263.5,
51858.2, 51320.4, 50740.5, 51571.6, 51722.7, 54495.1, 57056.2],
[45293.3, 47127.5, 47758.2, 48277.3, 49941.3, 49716. , 51782.4,
51901.3, 52134.2, 51646. , 52117.5, 51783.6, 52263.5, 51858.2,
51320.4, 50740.5, 51571.6, 51722.7, 54495.1, 57056.2, 62467.6],
[47127.5, 47758.2, 48277.3, 49941.3, 49716. , 51782.4, 51901.3,
52134.2, 51646. , 52117.5, 51783.6, 52263.5, 51858.2, 51320.4,
50740.5, 51571.6, 51722.7, 54495.1, 57056.2, 62467.6, 61169.3],
[47758.2, 48277.3, 49941.3, 49716. , 51782.4, 51901.3, 52134.2,
51646. , 52117.5, 51783.6, 52263.5, 51858.2, 51320.4, 50740.5,
51571.6, 51722.7, 54495.1, 57056.2, 62467.6, 61169.3, 62397.7],
[48277.3, 49941.3, 49716. , 51782.4, 51901.3, 52134.2, 51646. ,
52117.5, 51783.6, 52263.5, 51858.2, 51320.4, 50740.5, 51571.6,
51722.7, 54495.1, 57056.2, 62467.6, 61169.3, 62397.7, 61994.5]])
y_test_price21
array([[49941.3],
[49716. ],
[51782.4],
[51901.3],
[52134.2],
[51646. ],
[52117.5],
[51783.6],
[52263.5],
[51858.2],
[51320.4],
[50740.5],
[51571.6],
[51722.7],
[54495.1],
[57056.2],
[62467.6],
[61169.3],
[62397.7],
[61994.5],
[63135.8]])
Horizonte de 28 días (\(\tau=28\))
tau28 = 28
X_train_price28, y_train_price28, X_val_price28, y_val_price28, X_test_price28, y_test_price28 = create_time_series_datasets(df_1_st,'Price',tau28)
# Imprimir las dimensiones
print("X_train shape:", X_train_price28.shape)
print("y_train shape:", y_train_price28.shape)
print("X_val shape:", X_val_price28.shape)
print("y_val shape:", y_val_price28.shape)
print("X_test shape:", X_test_price28.shape)
print("y_test shape:", y_test_price28.shape)
X_train shape: (4887, 28)
y_train shape: (4887, 1)
X_val shape: (28, 28)
y_val shape: (28, 1)
X_test shape: (28, 28)
y_test shape: (28, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 28\).
X_train_price28
array([[1.00000e-01, 1.00000e-01, 1.00000e-01, ..., 1.00000e-01,
1.00000e-01, 1.00000e-01],
[1.00000e-01, 1.00000e-01, 1.00000e-01, ..., 1.00000e-01,
1.00000e-01, 1.00000e-01],
[1.00000e-01, 1.00000e-01, 1.00000e-01, ..., 1.00000e-01,
1.00000e-01, 1.00000e-01],
...,
[3.86882e+04, 3.94584e+04, 3.99702e+04, ..., 4.25133e+04,
4.34465e+04, 4.25811e+04],
[3.94584e+04, 3.99702e+04, 4.19878e+04, ..., 4.34465e+04,
4.25811e+04, 4.20724e+04],
[3.99702e+04, 4.19878e+04, 4.40762e+04, ..., 4.25811e+04,
4.20724e+04, 4.21367e+04]])
y_train_price28
array([[1.00000e-01],
[1.00000e-01],
[1.00000e-01],
...,
[4.20724e+04],
[4.21367e+04],
[4.22725e+04]])
X_val_price28
array([[41987.8, 44076.2, 43776.3, 43289.7, 44175.5, 43718.4, 43791. ,
41256.1, 41487. , 42884.5, 43025.9, 41929. , 42271.7, 41368.7,
42659.7, 42259.3, 43662.8, 43865.9, 43968.9, 43710.4, 42981.5,
43578.5, 42513.3, 43446.5, 42581.1, 42072.4, 42136.7, 42272.5],
[44076.2, 43776.3, 43289.7, 44175.5, 43718.4, 43791. , 41256.1,
41487. , 42884.5, 43025.9, 41929. , 42271.7, 41368.7, 42659.7,
42259.3, 43662.8, 43865.9, 43968.9, 43710.4, 42981.5, 43578.5,
42513.3, 43446.5, 42581.1, 42072.4, 42136.7, 42272.5, 44183.4],
[43776.3, 43289.7, 44175.5, 43718.4, 43791. , 41256.1, 41487. ,
42884.5, 43025.9, 41929. , 42271.7, 41368.7, 42659.7, 42259.3,
43662.8, 43865.9, 43968.9, 43710.4, 42981.5, 43578.5, 42513.3,
43446.5, 42581.1, 42072.4, 42136.7, 42272.5, 44183.4, 44943.7],
[43289.7, 44175.5, 43718.4, 43791. , 41256.1, 41487. , 42884.5,
43025.9, 41929. , 42271.7, 41368.7, 42659.7, 42259.3, 43662.8,
43865.9, 43968.9, 43710.4, 42981.5, 43578.5, 42513.3, 43446.5,
42581.1, 42072.4, 42136.7, 42272.5, 44183.4, 44943.7, 42836.1],
[44175.5, 43718.4, 43791. , 41256.1, 41487. , 42884.5, 43025.9,
41929. , 42271.7, 41368.7, 42659.7, 42259.3, 43662.8, 43865.9,
43968.9, 43710.4, 42981.5, 43578.5, 42513.3, 43446.5, 42581.1,
42072.4, 42136.7, 42272.5, 44183.4, 44943.7, 42836.1, 44157. ],
[43718.4, 43791. , 41256.1, 41487. , 42884.5, 43025.9, 41929. ,
42271.7, 41368.7, 42659.7, 42259.3, 43662.8, 43865.9, 43968.9,
43710.4, 42981.5, 43578.5, 42513.3, 43446.5, 42581.1, 42072.4,
42136.7, 42272.5, 44183.4, 44943.7, 42836.1, 44157. , 44156.9],
[43791. , 41256.1, 41487. , 42884.5, 43025.9, 41929. , 42271.7,
41368.7, 42659.7, 42259.3, 43662.8, 43865.9, 43968.9, 43710.4,
42981.5, 43578.5, 42513.3, 43446.5, 42581.1, 42072.4, 42136.7,
42272.5, 44183.4, 44943.7, 42836.1, 44157. , 44156.9, 43967.9],
[41256.1, 41487. , 42884.5, 43025.9, 41929. , 42271.7, 41368.7,
42659.7, 42259.3, 43662.8, 43865.9, 43968.9, 43710.4, 42981.5,
43578.5, 42513.3, 43446.5, 42581.1, 42072.4, 42136.7, 42272.5,
44183.4, 44943.7, 42836.1, 44157. , 44156.9, 43967.9, 43927.3],
[41487. , 42884.5, 43025.9, 41929. , 42271.7, 41368.7, 42659.7,
42259.3, 43662.8, 43865.9, 43968.9, 43710.4, 42981.5, 43578.5,
42513.3, 43446.5, 42581.1, 42072.4, 42136.7, 42272.5, 44183.4,
44943.7, 42836.1, 44157. , 44156.9, 43967.9, 43927.3, 46962.2],
[42884.5, 43025.9, 41929. , 42271.7, 41368.7, 42659.7, 42259.3,
43662.8, 43865.9, 43968.9, 43710.4, 42981.5, 43578.5, 42513.3,
43446.5, 42581.1, 42072.4, 42136.7, 42272.5, 44183.4, 44943.7,
42836.1, 44157. , 44156.9, 43967.9, 43927.3, 46962.2, 46129. ],
[43025.9, 41929. , 42271.7, 41368.7, 42659.7, 42259.3, 43662.8,
43865.9, 43968.9, 43710.4, 42981.5, 43578.5, 42513.3, 43446.5,
42581.1, 42072.4, 42136.7, 42272.5, 44183.4, 44943.7, 42836.1,
44157. , 44156.9, 43967.9, 43927.3, 46962.2, 46129. , 46629.3],
[41929. , 42271.7, 41368.7, 42659.7, 42259.3, 43662.8, 43865.9,
43968.9, 43710.4, 42981.5, 43578.5, 42513.3, 43446.5, 42581.1,
42072.4, 42136.7, 42272.5, 44183.4, 44943.7, 42836.1, 44157. ,
44156.9, 43967.9, 43927.3, 46962.2, 46129. , 46629.3, 46348.2],
[42271.7, 41368.7, 42659.7, 42259.3, 43662.8, 43865.9, 43968.9,
43710.4, 42981.5, 43578.5, 42513.3, 43446.5, 42581.1, 42072.4,
42136.7, 42272.5, 44183.4, 44943.7, 42836.1, 44157. , 44156.9,
43967.9, 43927.3, 46962.2, 46129. , 46629.3, 46348.2, 42835.9],
[41368.7, 42659.7, 42259.3, 43662.8, 43865.9, 43968.9, 43710.4,
42981.5, 43578.5, 42513.3, 43446.5, 42581.1, 42072.4, 42136.7,
42272.5, 44183.4, 44943.7, 42836.1, 44157. , 44156.9, 43967.9,
43927.3, 46962.2, 46129. , 46629.3, 46348.2, 42835.9, 42851.3],
[42659.7, 42259.3, 43662.8, 43865.9, 43968.9, 43710.4, 42981.5,
43578.5, 42513.3, 43446.5, 42581.1, 42072.4, 42136.7, 42272.5,
44183.4, 44943.7, 42836.1, 44157. , 44156.9, 43967.9, 43927.3,
46962.2, 46129. , 46629.3, 46348.2, 42835.9, 42851.3, 41746.1],
[42259.3, 43662.8, 43865.9, 43968.9, 43710.4, 42981.5, 43578.5,
42513.3, 43446.5, 42581.1, 42072.4, 42136.7, 42272.5, 44183.4,
44943.7, 42836.1, 44157. , 44156.9, 43967.9, 43927.3, 46962.2,
46129. , 46629.3, 46348.2, 42835.9, 42851.3, 41746.1, 42510.7],
[43662.8, 43865.9, 43968.9, 43710.4, 42981.5, 43578.5, 42513.3,
43446.5, 42581.1, 42072.4, 42136.7, 42272.5, 44183.4, 44943.7,
42836.1, 44157. , 44156.9, 43967.9, 43927.3, 46962.2, 46129. ,
46629.3, 46348.2, 42835.9, 42851.3, 41746.1, 42510.7, 43145.5],
[43865.9, 43968.9, 43710.4, 42981.5, 43578.5, 42513.3, 43446.5,
42581.1, 42072.4, 42136.7, 42272.5, 44183.4, 44943.7, 42836.1,
44157. , 44156.9, 43967.9, 43927.3, 46962.2, 46129. , 46629.3,
46348.2, 42835.9, 42851.3, 41746.1, 42510.7, 43145.5, 42768.7],
[43968.9, 43710.4, 42981.5, 43578.5, 42513.3, 43446.5, 42581.1,
42072.4, 42136.7, 42272.5, 44183.4, 44943.7, 42836.1, 44157. ,
44156.9, 43967.9, 43927.3, 46962.2, 46129. , 46629.3, 46348.2,
42835.9, 42851.3, 41746.1, 42510.7, 43145.5, 42768.7, 41292.7],
[43710.4, 42981.5, 43578.5, 42513.3, 43446.5, 42581.1, 42072.4,
42136.7, 42272.5, 44183.4, 44943.7, 42836.1, 44157. , 44156.9,
43967.9, 43927.3, 46962.2, 46129. , 46629.3, 46348.2, 42835.9,
42851.3, 41746.1, 42510.7, 43145.5, 42768.7, 41292.7, 41648. ],
[42981.5, 43578.5, 42513.3, 43446.5, 42581.1, 42072.4, 42136.7,
42272.5, 44183.4, 44943.7, 42836.1, 44157. , 44156.9, 43967.9,
43927.3, 46962.2, 46129. , 46629.3, 46348.2, 42835.9, 42851.3,
41746.1, 42510.7, 43145.5, 42768.7, 41292.7, 41648. , 41695.4],
[43578.5, 42513.3, 43446.5, 42581.1, 42072.4, 42136.7, 42272.5,
44183.4, 44943.7, 42836.1, 44157. , 44156.9, 43967.9, 43927.3,
46962.2, 46129. , 46629.3, 46348.2, 42835.9, 42851.3, 41746.1,
42510.7, 43145.5, 42768.7, 41292.7, 41648. , 41695.4, 41583.2],
[42513.3, 43446.5, 42581.1, 42072.4, 42136.7, 42272.5, 44183.4,
44943.7, 42836.1, 44157. , 44156.9, 43967.9, 43927.3, 46962.2,
46129. , 46629.3, 46348.2, 42835.9, 42851.3, 41746.1, 42510.7,
43145.5, 42768.7, 41292.7, 41648. , 41695.4, 41583.2, 39556.4],
[43446.5, 42581.1, 42072.4, 42136.7, 42272.5, 44183.4, 44943.7,
42836.1, 44157. , 44156.9, 43967.9, 43927.3, 46962.2, 46129. ,
46629.3, 46348.2, 42835.9, 42851.3, 41746.1, 42510.7, 43145.5,
42768.7, 41292.7, 41648. , 41695.4, 41583.2, 39556.4, 39888.8],
[42581.1, 42072.4, 42136.7, 42272.5, 44183.4, 44943.7, 42836.1,
44157. , 44156.9, 43967.9, 43927.3, 46962.2, 46129. , 46629.3,
46348.2, 42835.9, 42851.3, 41746.1, 42510.7, 43145.5, 42768.7,
41292.7, 41648. , 41695.4, 41583.2, 39556.4, 39888.8, 40086. ],
[42072.4, 42136.7, 42272.5, 44183.4, 44943.7, 42836.1, 44157. ,
44156.9, 43967.9, 43927.3, 46962.2, 46129. , 46629.3, 46348.2,
42835.9, 42851.3, 41746.1, 42510.7, 43145.5, 42768.7, 41292.7,
41648. , 41695.4, 41583.2, 39556.4, 39888.8, 40086. , 39935.7],
[42136.7, 42272.5, 44183.4, 44943.7, 42836.1, 44157. , 44156.9,
43967.9, 43927.3, 46962.2, 46129. , 46629.3, 46348.2, 42835.9,
42851.3, 41746.1, 42510.7, 43145.5, 42768.7, 41292.7, 41648. ,
41695.4, 41583.2, 39556.4, 39888.8, 40086. , 39935.7, 41811.3],
[42272.5, 44183.4, 44943.7, 42836.1, 44157. , 44156.9, 43967.9,
43927.3, 46962.2, 46129. , 46629.3, 46348.2, 42835.9, 42851.3,
41746.1, 42510.7, 43145.5, 42768.7, 41292.7, 41648. , 41695.4,
41583.2, 39556.4, 39888.8, 40086. , 39935.7, 41811.3, 42120.9]])
y_val_price28
array([[44183.4],
[44943.7],
[42836.1],
[44157. ],
[44156.9],
[43967.9],
[43927.3],
[46962.2],
[46129. ],
[46629.3],
[46348.2],
[42835.9],
[42851.3],
[41746.1],
[42510.7],
[43145.5],
[42768.7],
[41292.7],
[41648. ],
[41695.4],
[41583.2],
[39556.4],
[39888.8],
[40086. ],
[39935.7],
[41811.3],
[42120.9],
[42030.7]])
X_test_price28
array([[44183.4, 44943.7, 42836.1, 44157. , 44156.9, 43967.9, 43927.3,
46962.2, 46129. , 46629.3, 46348.2, 42835.9, 42851.3, 41746.1,
42510.7, 43145.5, 42768.7, 41292.7, 41648. , 41695.4, 41583.2,
39556.4, 39888.8, 40086. , 39935.7, 41811.3, 42120.9, 42030.7],
[44943.7, 42836.1, 44157. , 44156.9, 43967.9, 43927.3, 46962.2,
46129. , 46629.3, 46348.2, 42835.9, 42851.3, 41746.1, 42510.7,
43145.5, 42768.7, 41292.7, 41648. , 41695.4, 41583.2, 39556.4,
39888.8, 40086. , 39935.7, 41811.3, 42120.9, 42030.7, 43299.8],
[42836.1, 44157. , 44156.9, 43967.9, 43927.3, 46962.2, 46129. ,
46629.3, 46348.2, 42835.9, 42851.3, 41746.1, 42510.7, 43145.5,
42768.7, 41292.7, 41648. , 41695.4, 41583.2, 39556.4, 39888.8,
40086. , 39935.7, 41811.3, 42120.9, 42030.7, 43299.8, 42946.2],
[44157. , 44156.9, 43967.9, 43927.3, 46962.2, 46129. , 46629.3,
46348.2, 42835.9, 42851.3, 41746.1, 42510.7, 43145.5, 42768.7,
41292.7, 41648. , 41695.4, 41583.2, 39556.4, 39888.8, 40086. ,
39935.7, 41811.3, 42120.9, 42030.7, 43299.8, 42946.2, 42580.5],
[44156.9, 43967.9, 43927.3, 46962.2, 46129. , 46629.3, 46348.2,
42835.9, 42851.3, 41746.1, 42510.7, 43145.5, 42768.7, 41292.7,
41648. , 41695.4, 41583.2, 39556.4, 39888.8, 40086. , 39935.7,
41811.3, 42120.9, 42030.7, 43299.8, 42946.2, 42580.5, 43081.4],
[43967.9, 43927.3, 46962.2, 46129. , 46629.3, 46348.2, 42835.9,
42851.3, 41746.1, 42510.7, 43145.5, 42768.7, 41292.7, 41648. ,
41695.4, 41583.2, 39556.4, 39888.8, 40086. , 39935.7, 41811.3,
42120.9, 42030.7, 43299.8, 42946.2, 42580.5, 43081.4, 43194.7],
[43927.3, 46962.2, 46129. , 46629.3, 46348.2, 42835.9, 42851.3,
41746.1, 42510.7, 43145.5, 42768.7, 41292.7, 41648. , 41695.4,
41583.2, 39556.4, 39888.8, 40086. , 39935.7, 41811.3, 42120.9,
42030.7, 43299.8, 42946.2, 42580.5, 43081.4, 43194.7, 43005.7],
[46962.2, 46129. , 46629.3, 46348.2, 42835.9, 42851.3, 41746.1,
42510.7, 43145.5, 42768.7, 41292.7, 41648. , 41695.4, 41583.2,
39556.4, 39888.8, 40086. , 39935.7, 41811.3, 42120.9, 42030.7,
43299.8, 42946.2, 42580.5, 43081.4, 43194.7, 43005.7, 42581.4],
[46129. , 46629.3, 46348.2, 42835.9, 42851.3, 41746.1, 42510.7,
43145.5, 42768.7, 41292.7, 41648. , 41695.4, 41583.2, 39556.4,
39888.8, 40086. , 39935.7, 41811.3, 42120.9, 42030.7, 43299.8,
42946.2, 42580.5, 43081.4, 43194.7, 43005.7, 42581.4, 42697.2],
[46629.3, 46348.2, 42835.9, 42851.3, 41746.1, 42510.7, 43145.5,
42768.7, 41292.7, 41648. , 41695.4, 41583.2, 39556.4, 39888.8,
40086. , 39935.7, 41811.3, 42120.9, 42030.7, 43299.8, 42946.2,
42580.5, 43081.4, 43194.7, 43005.7, 42581.4, 42697.2, 43087.7],
[46348.2, 42835.9, 42851.3, 41746.1, 42510.7, 43145.5, 42768.7,
41292.7, 41648. , 41695.4, 41583.2, 39556.4, 39888.8, 40086. ,
39935.7, 41811.3, 42120.9, 42030.7, 43299.8, 42946.2, 42580.5,
43081.4, 43194.7, 43005.7, 42581.4, 42697.2, 43087.7, 44339.8],
[42835.9, 42851.3, 41746.1, 42510.7, 43145.5, 42768.7, 41292.7,
41648. , 41695.4, 41583.2, 39556.4, 39888.8, 40086. , 39935.7,
41811.3, 42120.9, 42030.7, 43299.8, 42946.2, 42580.5, 43081.4,
43194.7, 43005.7, 42581.4, 42697.2, 43087.7, 44339.8, 45293.3],
[42851.3, 41746.1, 42510.7, 43145.5, 42768.7, 41292.7, 41648. ,
41695.4, 41583.2, 39556.4, 39888.8, 40086. , 39935.7, 41811.3,
42120.9, 42030.7, 43299.8, 42946.2, 42580.5, 43081.4, 43194.7,
43005.7, 42581.4, 42697.2, 43087.7, 44339.8, 45293.3, 47127.5],
[41746.1, 42510.7, 43145.5, 42768.7, 41292.7, 41648. , 41695.4,
41583.2, 39556.4, 39888.8, 40086. , 39935.7, 41811.3, 42120.9,
42030.7, 43299.8, 42946.2, 42580.5, 43081.4, 43194.7, 43005.7,
42581.4, 42697.2, 43087.7, 44339.8, 45293.3, 47127.5, 47758.2],
[42510.7, 43145.5, 42768.7, 41292.7, 41648. , 41695.4, 41583.2,
39556.4, 39888.8, 40086. , 39935.7, 41811.3, 42120.9, 42030.7,
43299.8, 42946.2, 42580.5, 43081.4, 43194.7, 43005.7, 42581.4,
42697.2, 43087.7, 44339.8, 45293.3, 47127.5, 47758.2, 48277.3],
[43145.5, 42768.7, 41292.7, 41648. , 41695.4, 41583.2, 39556.4,
39888.8, 40086. , 39935.7, 41811.3, 42120.9, 42030.7, 43299.8,
42946.2, 42580.5, 43081.4, 43194.7, 43005.7, 42581.4, 42697.2,
43087.7, 44339.8, 45293.3, 47127.5, 47758.2, 48277.3, 49941.3],
[42768.7, 41292.7, 41648. , 41695.4, 41583.2, 39556.4, 39888.8,
40086. , 39935.7, 41811.3, 42120.9, 42030.7, 43299.8, 42946.2,
42580.5, 43081.4, 43194.7, 43005.7, 42581.4, 42697.2, 43087.7,
44339.8, 45293.3, 47127.5, 47758.2, 48277.3, 49941.3, 49716. ],
[41292.7, 41648. , 41695.4, 41583.2, 39556.4, 39888.8, 40086. ,
39935.7, 41811.3, 42120.9, 42030.7, 43299.8, 42946.2, 42580.5,
43081.4, 43194.7, 43005.7, 42581.4, 42697.2, 43087.7, 44339.8,
45293.3, 47127.5, 47758.2, 48277.3, 49941.3, 49716. , 51782.4],
[41648. , 41695.4, 41583.2, 39556.4, 39888.8, 40086. , 39935.7,
41811.3, 42120.9, 42030.7, 43299.8, 42946.2, 42580.5, 43081.4,
43194.7, 43005.7, 42581.4, 42697.2, 43087.7, 44339.8, 45293.3,
47127.5, 47758.2, 48277.3, 49941.3, 49716. , 51782.4, 51901.3],
[41695.4, 41583.2, 39556.4, 39888.8, 40086. , 39935.7, 41811.3,
42120.9, 42030.7, 43299.8, 42946.2, 42580.5, 43081.4, 43194.7,
43005.7, 42581.4, 42697.2, 43087.7, 44339.8, 45293.3, 47127.5,
47758.2, 48277.3, 49941.3, 49716. , 51782.4, 51901.3, 52134.2],
[41583.2, 39556.4, 39888.8, 40086. , 39935.7, 41811.3, 42120.9,
42030.7, 43299.8, 42946.2, 42580.5, 43081.4, 43194.7, 43005.7,
42581.4, 42697.2, 43087.7, 44339.8, 45293.3, 47127.5, 47758.2,
48277.3, 49941.3, 49716. , 51782.4, 51901.3, 52134.2, 51646. ],
[39556.4, 39888.8, 40086. , 39935.7, 41811.3, 42120.9, 42030.7,
43299.8, 42946.2, 42580.5, 43081.4, 43194.7, 43005.7, 42581.4,
42697.2, 43087.7, 44339.8, 45293.3, 47127.5, 47758.2, 48277.3,
49941.3, 49716. , 51782.4, 51901.3, 52134.2, 51646. , 52117.5],
[39888.8, 40086. , 39935.7, 41811.3, 42120.9, 42030.7, 43299.8,
42946.2, 42580.5, 43081.4, 43194.7, 43005.7, 42581.4, 42697.2,
43087.7, 44339.8, 45293.3, 47127.5, 47758.2, 48277.3, 49941.3,
49716. , 51782.4, 51901.3, 52134.2, 51646. , 52117.5, 51783.6],
[40086. , 39935.7, 41811.3, 42120.9, 42030.7, 43299.8, 42946.2,
42580.5, 43081.4, 43194.7, 43005.7, 42581.4, 42697.2, 43087.7,
44339.8, 45293.3, 47127.5, 47758.2, 48277.3, 49941.3, 49716. ,
51782.4, 51901.3, 52134.2, 51646. , 52117.5, 51783.6, 52263.5],
[39935.7, 41811.3, 42120.9, 42030.7, 43299.8, 42946.2, 42580.5,
43081.4, 43194.7, 43005.7, 42581.4, 42697.2, 43087.7, 44339.8,
45293.3, 47127.5, 47758.2, 48277.3, 49941.3, 49716. , 51782.4,
51901.3, 52134.2, 51646. , 52117.5, 51783.6, 52263.5, 51858.2],
[41811.3, 42120.9, 42030.7, 43299.8, 42946.2, 42580.5, 43081.4,
43194.7, 43005.7, 42581.4, 42697.2, 43087.7, 44339.8, 45293.3,
47127.5, 47758.2, 48277.3, 49941.3, 49716. , 51782.4, 51901.3,
52134.2, 51646. , 52117.5, 51783.6, 52263.5, 51858.2, 51320.4],
[42120.9, 42030.7, 43299.8, 42946.2, 42580.5, 43081.4, 43194.7,
43005.7, 42581.4, 42697.2, 43087.7, 44339.8, 45293.3, 47127.5,
47758.2, 48277.3, 49941.3, 49716. , 51782.4, 51901.3, 52134.2,
51646. , 52117.5, 51783.6, 52263.5, 51858.2, 51320.4, 50740.5],
[42030.7, 43299.8, 42946.2, 42580.5, 43081.4, 43194.7, 43005.7,
42581.4, 42697.2, 43087.7, 44339.8, 45293.3, 47127.5, 47758.2,
48277.3, 49941.3, 49716. , 51782.4, 51901.3, 52134.2, 51646. ,
52117.5, 51783.6, 52263.5, 51858.2, 51320.4, 50740.5, 51571.6]])
y_test_price28
array([[43299.8],
[42946.2],
[42580.5],
[43081.4],
[43194.7],
[43005.7],
[42581.4],
[42697.2],
[43087.7],
[44339.8],
[45293.3],
[47127.5],
[47758.2],
[48277.3],
[49941.3],
[49716. ],
[51782.4],
[51901.3],
[52134.2],
[51646. ],
[52117.5],
[51783.6],
[52263.5],
[51858.2],
[51320.4],
[50740.5],
[51571.6],
[51722.7]])
Retorno Acumulado (A_t)#
Horizonte de 7 días (\(\tau=7\))
X_train_at7, y_train_at7, X_val_at7, y_val_at7, X_test_at7, y_test_at7 = create_time_series_datasets(df_1_st,'A_t',tau7)
# Imprimir las dimensiones
print("X_train shape:", X_train_at7.shape)
print("y_train shape:", y_train_at7.shape)
print("X_val shape:", X_val_at7.shape)
print("y_val shape:", y_val_at7.shape)
print("X_test shape:", X_test_at7.shape)
print("y_test shape:", y_test_at7.shape)
X_train shape: (4971, 7)
y_train shape: (4971, 1)
X_val shape: (7, 7)
y_val shape: (7, 1)
X_test shape: (7, 7)
y_test shape: (7, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 7\).
X_train_at7
array([[ 0. , 0. , 0. , ..., 0. ,
0. , 0. ],
[ 0. , 0. , 0. , ..., 0. ,
0. , 0. ],
[ 0. , 0. , 0. , ..., 0. ,
0. , 0. ],
...,
[23.40281077, 23.41919019, 23.4221201 , ..., 23.52271821,
23.61756154, 23.59677797],
[23.41919019, 23.4221201 , 23.47572133, ..., 23.61756154,
23.59677797, 23.61685994],
[23.4221201 , 23.47572133, 23.52271821, ..., 23.59677797,
23.61685994, 23.61039816]])
y_train_at7
array([[ 0. ],
[ 0. ],
[ 0. ],
...,
[23.61685994],
[23.61039816],
[23.62880786]])
X_val_at7
array([[23.47572133, 23.52271821, 23.61756154, 23.59677797, 23.61685994,
23.61039816, 23.62880786],
[23.52271821, 23.61756154, 23.59677797, 23.61685994, 23.61039816,
23.62880786, 23.71012939],
[23.61756154, 23.59677797, 23.61685994, 23.61039816, 23.62880786,
23.71012939, 23.64454431],
[23.59677797, 23.61685994, 23.61039816, 23.62880786, 23.71012939,
23.64454431, 23.6804074 ],
[23.61685994, 23.61039816, 23.62880786, 23.71012939, 23.64454431,
23.6804074 , 23.69213403],
[23.61039816, 23.62880786, 23.71012939, 23.64454431, 23.6804074 ,
23.69213403, 23.7118288 ],
[23.62880786, 23.71012939, 23.64454431, 23.6804074 , 23.69213403,
23.7118288 , 23.71468188]])
y_val_at7
array([[23.71012939],
[23.64454431],
[23.6804074 ],
[23.69213403],
[23.7118288 ],
[23.71468188],
[23.72343324]])
X_test_at7
array([[23.71012939, 23.64454431, 23.6804074 , 23.69213403, 23.7118288 ,
23.71468188, 23.72343324],
[23.64454431, 23.6804074 , 23.69213403, 23.7118288 , 23.71468188,
23.72343324, 23.76888106],
[23.6804074 , 23.69213403, 23.7118288 , 23.71468188, 23.72343324,
23.76888106, 23.76015835],
[23.69213403, 23.7118288 , 23.71468188, 23.72343324, 23.76888106,
23.76015835, 23.78249073],
[23.7118288 , 23.71468188, 23.72343324, 23.76888106, 23.76015835,
23.78249073, 23.75951434],
[23.71468188, 23.72343324, 23.76888106, 23.76015835, 23.78249073,
23.75951434, 23.73256564],
[23.72343324, 23.76888106, 23.76015835, 23.78249073, 23.75951434,
23.73256564, 23.67282941]])
y_test_at7
array([[23.76888106],
[23.76015835],
[23.78249073],
[23.75951434],
[23.73256564],
[23.67282941],
[23.71994014]])
Horizonte de 14 días (\(\tau=14\))
X_train_at14, y_train_at14, X_val_at14, y_val_at14, X_test_at14, y_test_at14 = create_time_series_datasets(df_1_st,'A_t',tau14)
# Imprimir las dimensiones
print("X_train shape:", X_train_at14.shape)
print("y_train shape:", y_train_at14.shape)
print("X_val shape:", X_val_at14.shape)
print("y_val shape:", y_val_at14.shape)
print("X_test shape:", X_test_at14.shape)
print("y_test shape:", y_test_at14.shape)
X_train shape: (4943, 14)
y_train shape: (4943, 1)
X_val shape: (14, 14)
y_val shape: (14, 1)
X_test shape: (14, 14)
y_test shape: (14, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 14\).
X_train_at14
array([[ 0. , 0. , 0. , ..., 0. ,
0. , 0. ],
[ 0. , 0. , 0. , ..., 0. ,
0. , 0. ],
[ 0. , 0. , 0. , ..., 0. ,
0. , 0. ],
...,
[23.20520085, 23.21260555, 23.21046409, ..., 23.2359942 ,
23.26505353, 23.28655792],
[23.21260555, 23.21046409, 23.24065869, ..., 23.26505353,
23.28655792, 23.32705397],
[23.21046409, 23.24065869, 23.23249237, ..., 23.28655792,
23.32705397, 23.34043682]])
y_train_at14
array([[ 0. ],
[ 0. ],
[ 0. ],
...,
[23.32705397],
[23.34043682],
[23.35130615]])
X_val_at14
array([[23.24065869, 23.23249237, 23.22397707, 23.23574067, 23.23837057,
23.23399503, 23.2241289 , 23.2268484 , 23.2359942 , 23.26505353,
23.28655792, 23.32705397, 23.34043682, 23.35130615],
[23.23249237, 23.22397707, 23.23574067, 23.23837057, 23.23399503,
23.2241289 , 23.2268484 , 23.2359942 , 23.26505353, 23.28655792,
23.32705397, 23.34043682, 23.35130615, 23.3857737 ],
[23.22397707, 23.23574067, 23.23837057, 23.23399503, 23.2241289 ,
23.2268484 , 23.2359942 , 23.26505353, 23.28655792, 23.32705397,
23.34043682, 23.35130615, 23.3857737 , 23.3812624 ],
[23.23574067, 23.23837057, 23.23399503, 23.2241289 , 23.2268484 ,
23.2359942 , 23.26505353, 23.28655792, 23.32705397, 23.34043682,
23.35130615, 23.3857737 , 23.3812624 , 23.42282649],
[23.23837057, 23.23399503, 23.2241289 , 23.2268484 , 23.2359942 ,
23.26505353, 23.28655792, 23.32705397, 23.34043682, 23.35130615,
23.3857737 , 23.3812624 , 23.42282649, 23.42512263],
[23.23399503, 23.2241289 , 23.2268484 , 23.2359942 , 23.26505353,
23.28655792, 23.32705397, 23.34043682, 23.35130615, 23.3857737 ,
23.3812624 , 23.42282649, 23.42512263, 23.42961 ],
[23.2241289 , 23.2268484 , 23.2359942 , 23.26505353, 23.28655792,
23.32705397, 23.34043682, 23.35130615, 23.3857737 , 23.3812624 ,
23.42282649, 23.42512263, 23.42961 , 23.4202457 ],
[23.2268484 , 23.2359942 , 23.26505353, 23.28655792, 23.32705397,
23.34043682, 23.35130615, 23.3857737 , 23.3812624 , 23.42282649,
23.42512263, 23.42961 , 23.4202457 , 23.42937516],
[23.2359942 , 23.26505353, 23.28655792, 23.32705397, 23.34043682,
23.35130615, 23.3857737 , 23.3812624 , 23.42282649, 23.42512263,
23.42961 , 23.4202457 , 23.42937516, 23.42296848],
[23.26505353, 23.28655792, 23.32705397, 23.34043682, 23.35130615,
23.3857737 , 23.3812624 , 23.42282649, 23.42512263, 23.42961 ,
23.4202457 , 23.42937516, 23.42296848, 23.4322359 ],
[23.28655792, 23.32705397, 23.34043682, 23.35130615, 23.3857737 ,
23.3812624 , 23.42282649, 23.42512263, 23.42961 , 23.4202457 ,
23.42937516, 23.42296848, 23.4322359 , 23.42448096],
[23.32705397, 23.34043682, 23.35130615, 23.3857737 , 23.3812624 ,
23.42282649, 23.42512263, 23.42961 , 23.4202457 , 23.42937516,
23.42296848, 23.4322359 , 23.42448096, 23.41411037],
[23.34043682, 23.35130615, 23.3857737 , 23.3812624 , 23.42282649,
23.42512263, 23.42961 , 23.4202457 , 23.42937516, 23.42296848,
23.4322359 , 23.42448096, 23.41411037, 23.40281077],
[23.35130615, 23.3857737 , 23.3812624 , 23.42282649, 23.42512263,
23.42961 , 23.4202457 , 23.42937516, 23.42296848, 23.4322359 ,
23.42448096, 23.41411037, 23.40281077, 23.41919019]])
y_val_at14
array([[23.3857737 ],
[23.3812624 ],
[23.42282649],
[23.42512263],
[23.42961 ],
[23.4202457 ],
[23.42937516],
[23.42296848],
[23.4322359 ],
[23.42448096],
[23.41411037],
[23.40281077],
[23.41919019],
[23.4221201 ]])
X_test_at14
array([[23.3857737 , 23.3812624 , 23.42282649, 23.42512263, 23.42961 ,
23.4202457 , 23.42937516, 23.42296848, 23.4322359 , 23.42448096,
23.41411037, 23.40281077, 23.41919019, 23.4221201 ],
[23.3812624 , 23.42282649, 23.42512263, 23.42961 , 23.4202457 ,
23.42937516, 23.42296848, 23.4322359 , 23.42448096, 23.41411037,
23.40281077, 23.41919019, 23.4221201 , 23.47572133],
[23.42282649, 23.42512263, 23.42961 , 23.4202457 , 23.42937516,
23.42296848, 23.4322359 , 23.42448096, 23.41411037, 23.40281077,
23.41919019, 23.4221201 , 23.47572133, 23.52271821],
[23.42512263, 23.42961 , 23.4202457 , 23.42937516, 23.42296848,
23.4322359 , 23.42448096, 23.41411037, 23.40281077, 23.41919019,
23.4221201 , 23.47572133, 23.52271821, 23.61756154],
[23.42961 , 23.4202457 , 23.42937516, 23.42296848, 23.4322359 ,
23.42448096, 23.41411037, 23.40281077, 23.41919019, 23.4221201 ,
23.47572133, 23.52271821, 23.61756154, 23.59677797],
[23.4202457 , 23.42937516, 23.42296848, 23.4322359 , 23.42448096,
23.41411037, 23.40281077, 23.41919019, 23.4221201 , 23.47572133,
23.52271821, 23.61756154, 23.59677797, 23.61685994],
[23.42937516, 23.42296848, 23.4322359 , 23.42448096, 23.41411037,
23.40281077, 23.41919019, 23.4221201 , 23.47572133, 23.52271821,
23.61756154, 23.59677797, 23.61685994, 23.61039816],
[23.42296848, 23.4322359 , 23.42448096, 23.41411037, 23.40281077,
23.41919019, 23.4221201 , 23.47572133, 23.52271821, 23.61756154,
23.59677797, 23.61685994, 23.61039816, 23.62880786],
[23.4322359 , 23.42448096, 23.41411037, 23.40281077, 23.41919019,
23.4221201 , 23.47572133, 23.52271821, 23.61756154, 23.59677797,
23.61685994, 23.61039816, 23.62880786, 23.71012939],
[23.42448096, 23.41411037, 23.40281077, 23.41919019, 23.4221201 ,
23.47572133, 23.52271821, 23.61756154, 23.59677797, 23.61685994,
23.61039816, 23.62880786, 23.71012939, 23.64454431],
[23.41411037, 23.40281077, 23.41919019, 23.4221201 , 23.47572133,
23.52271821, 23.61756154, 23.59677797, 23.61685994, 23.61039816,
23.62880786, 23.71012939, 23.64454431, 23.6804074 ],
[23.40281077, 23.41919019, 23.4221201 , 23.47572133, 23.52271821,
23.61756154, 23.59677797, 23.61685994, 23.61039816, 23.62880786,
23.71012939, 23.64454431, 23.6804074 , 23.69213403],
[23.41919019, 23.4221201 , 23.47572133, 23.52271821, 23.61756154,
23.59677797, 23.61685994, 23.61039816, 23.62880786, 23.71012939,
23.64454431, 23.6804074 , 23.69213403, 23.7118288 ],
[23.4221201 , 23.47572133, 23.52271821, 23.61756154, 23.59677797,
23.61685994, 23.61039816, 23.62880786, 23.71012939, 23.64454431,
23.6804074 , 23.69213403, 23.7118288 , 23.71468188]])
y_test_at14
array([[23.47572133],
[23.52271821],
[23.61756154],
[23.59677797],
[23.61685994],
[23.61039816],
[23.62880786],
[23.71012939],
[23.64454431],
[23.6804074 ],
[23.69213403],
[23.7118288 ],
[23.71468188],
[23.72343324]])
Horizonte de 21 días (\(\tau=21\))
X_train_at21, y_train_at21, X_val_at21, y_val_at21, X_test_at21, y_test_at21 = create_time_series_datasets(df_1_st,'A_t',tau21)
# Imprimir las dimensiones
print("X_train shape:", X_train_at21.shape)
print("y_train shape:", y_train_at21.shape)
print("X_val shape:", X_val_at21.shape)
print("y_val shape:", y_val_at21.shape)
print("X_test shape:", X_test_at21.shape)
print("y_test shape:", y_test_at21.shape)
X_train shape: (4915, 21)
y_train shape: (4915, 1)
X_val shape: (21, 21)
y_val shape: (21, 1)
X_test shape: (21, 21)
y_test shape: (21, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 21\).
X_train_at21
array([[ 0. , 0. , 0. , ..., 0. ,
0. , 0. ],
[ 0. , 0. , 0. , ..., 0. ,
0. , 0. ],
[ 0. , 0. , 0. , ..., 0. ,
0. , 0. ],
...,
[23.19947223, 23.20100054, 23.20422339, ..., 23.23357156,
23.22483832, 23.1903271 ],
[23.20100054, 23.20422339, 23.24942772, ..., 23.22483832,
23.1903271 , 23.19893153],
[23.20422339, 23.24942772, 23.26663554, ..., 23.1903271 ,
23.19893153, 23.20006964]])
y_train_at21
array([[ 0. ],
[ 0. ],
[ 0. ],
...,
[23.19893153],
[23.20006964],
[23.1973787 ]])
X_val_at21
array([[23.24942772, 23.26663554, 23.21974131, 23.25057745, 23.25057519,
23.246295 , 23.2453716 , 23.31446075, 23.29671882, 23.30756449,
23.30153609, 23.22575537, 23.22611489, 23.20032337, 23.21863885,
23.23357156, 23.22483832, 23.1903271 , 23.19893153, 23.20006964,
23.1973787 ],
[23.26663554, 23.21974131, 23.25057745, 23.25057519, 23.246295 ,
23.2453716 , 23.31446075, 23.29671882, 23.30756449, 23.30153609,
23.22575537, 23.22611489, 23.20032337, 23.21863885, 23.23357156,
23.22483832, 23.1903271 , 23.19893153, 23.20006964, 23.1973787 ,
23.14863786],
[23.21974131, 23.25057745, 23.25057519, 23.246295 , 23.2453716 ,
23.31446075, 23.29671882, 23.30756449, 23.30153609, 23.22575537,
23.22611489, 23.20032337, 23.21863885, 23.23357156, 23.22483832,
23.1903271 , 23.19893153, 23.20006964, 23.1973787 , 23.14863786,
23.15704105],
[23.25057745, 23.25057519, 23.246295 , 23.2453716 , 23.31446075,
23.29671882, 23.30756449, 23.30153609, 23.22575537, 23.22611489,
23.20032337, 23.21863885, 23.23357156, 23.22483832, 23.1903271 ,
23.19893153, 23.20006964, 23.1973787 , 23.14863786, 23.15704105,
23.16198479],
[23.25057519, 23.246295 , 23.2453716 , 23.31446075, 23.29671882,
23.30756449, 23.30153609, 23.22575537, 23.22611489, 23.20032337,
23.21863885, 23.23357156, 23.22483832, 23.1903271 , 23.19893153,
23.20006964, 23.1973787 , 23.14863786, 23.15704105, 23.16198479,
23.15823535],
[23.246295 , 23.2453716 , 23.31446075, 23.29671882, 23.30756449,
23.30153609, 23.22575537, 23.22611489, 23.20032337, 23.21863885,
23.23357156, 23.22483832, 23.1903271 , 23.19893153, 23.20006964,
23.1973787 , 23.14863786, 23.15704105, 23.16198479, 23.15823535,
23.20520085],
[23.2453716 , 23.31446075, 23.29671882, 23.30756449, 23.30153609,
23.22575537, 23.22611489, 23.20032337, 23.21863885, 23.23357156,
23.22483832, 23.1903271 , 23.19893153, 23.20006964, 23.1973787 ,
23.14863786, 23.15704105, 23.16198479, 23.15823535, 23.20520085,
23.21260555],
[23.31446075, 23.29671882, 23.30756449, 23.30153609, 23.22575537,
23.22611489, 23.20032337, 23.21863885, 23.23357156, 23.22483832,
23.1903271 , 23.19893153, 23.20006964, 23.1973787 , 23.14863786,
23.15704105, 23.16198479, 23.15823535, 23.20520085, 23.21260555,
23.21046409],
[23.29671882, 23.30756449, 23.30153609, 23.22575537, 23.22611489,
23.20032337, 23.21863885, 23.23357156, 23.22483832, 23.1903271 ,
23.19893153, 23.20006964, 23.1973787 , 23.14863786, 23.15704105,
23.16198479, 23.15823535, 23.20520085, 23.21260555, 23.21046409,
23.24065869],
[23.30756449, 23.30153609, 23.22575537, 23.22611489, 23.20032337,
23.21863885, 23.23357156, 23.22483832, 23.1903271 , 23.19893153,
23.20006964, 23.1973787 , 23.14863786, 23.15704105, 23.16198479,
23.15823535, 23.20520085, 23.21260555, 23.21046409, 23.24065869,
23.23249237],
[23.30153609, 23.22575537, 23.22611489, 23.20032337, 23.21863885,
23.23357156, 23.22483832, 23.1903271 , 23.19893153, 23.20006964,
23.1973787 , 23.14863786, 23.15704105, 23.16198479, 23.15823535,
23.20520085, 23.21260555, 23.21046409, 23.24065869, 23.23249237,
23.22397707],
[23.22575537, 23.22611489, 23.20032337, 23.21863885, 23.23357156,
23.22483832, 23.1903271 , 23.19893153, 23.20006964, 23.1973787 ,
23.14863786, 23.15704105, 23.16198479, 23.15823535, 23.20520085,
23.21260555, 23.21046409, 23.24065869, 23.23249237, 23.22397707,
23.23574067],
[23.22611489, 23.20032337, 23.21863885, 23.23357156, 23.22483832,
23.1903271 , 23.19893153, 23.20006964, 23.1973787 , 23.14863786,
23.15704105, 23.16198479, 23.15823535, 23.20520085, 23.21260555,
23.21046409, 23.24065869, 23.23249237, 23.22397707, 23.23574067,
23.23837057],
[23.20032337, 23.21863885, 23.23357156, 23.22483832, 23.1903271 ,
23.19893153, 23.20006964, 23.1973787 , 23.14863786, 23.15704105,
23.16198479, 23.15823535, 23.20520085, 23.21260555, 23.21046409,
23.24065869, 23.23249237, 23.22397707, 23.23574067, 23.23837057,
23.23399503],
[23.21863885, 23.23357156, 23.22483832, 23.1903271 , 23.19893153,
23.20006964, 23.1973787 , 23.14863786, 23.15704105, 23.16198479,
23.15823535, 23.20520085, 23.21260555, 23.21046409, 23.24065869,
23.23249237, 23.22397707, 23.23574067, 23.23837057, 23.23399503,
23.2241289 ],
[23.23357156, 23.22483832, 23.1903271 , 23.19893153, 23.20006964,
23.1973787 , 23.14863786, 23.15704105, 23.16198479, 23.15823535,
23.20520085, 23.21260555, 23.21046409, 23.24065869, 23.23249237,
23.22397707, 23.23574067, 23.23837057, 23.23399503, 23.2241289 ,
23.2268484 ],
[23.22483832, 23.1903271 , 23.19893153, 23.20006964, 23.1973787 ,
23.14863786, 23.15704105, 23.16198479, 23.15823535, 23.20520085,
23.21260555, 23.21046409, 23.24065869, 23.23249237, 23.22397707,
23.23574067, 23.23837057, 23.23399503, 23.2241289 , 23.2268484 ,
23.2359942 ],
[23.1903271 , 23.19893153, 23.20006964, 23.1973787 , 23.14863786,
23.15704105, 23.16198479, 23.15823535, 23.20520085, 23.21260555,
23.21046409, 23.24065869, 23.23249237, 23.22397707, 23.23574067,
23.23837057, 23.23399503, 23.2241289 , 23.2268484 , 23.2359942 ,
23.26505353],
[23.19893153, 23.20006964, 23.1973787 , 23.14863786, 23.15704105,
23.16198479, 23.15823535, 23.20520085, 23.21260555, 23.21046409,
23.24065869, 23.23249237, 23.22397707, 23.23574067, 23.23837057,
23.23399503, 23.2241289 , 23.2268484 , 23.2359942 , 23.26505353,
23.28655792],
[23.20006964, 23.1973787 , 23.14863786, 23.15704105, 23.16198479,
23.15823535, 23.20520085, 23.21260555, 23.21046409, 23.24065869,
23.23249237, 23.22397707, 23.23574067, 23.23837057, 23.23399503,
23.2241289 , 23.2268484 , 23.2359942 , 23.26505353, 23.28655792,
23.32705397],
[23.1973787 , 23.14863786, 23.15704105, 23.16198479, 23.15823535,
23.20520085, 23.21260555, 23.21046409, 23.24065869, 23.23249237,
23.22397707, 23.23574067, 23.23837057, 23.23399503, 23.2241289 ,
23.2268484 , 23.2359942 , 23.26505353, 23.28655792, 23.32705397,
23.34043682]])
y_val_at21
array([[23.14863786],
[23.15704105],
[23.16198479],
[23.15823535],
[23.20520085],
[23.21260555],
[23.21046409],
[23.24065869],
[23.23249237],
[23.22397707],
[23.23574067],
[23.23837057],
[23.23399503],
[23.2241289 ],
[23.2268484 ],
[23.2359942 ],
[23.26505353],
[23.28655792],
[23.32705397],
[23.34043682],
[23.35130615]])
X_test_at21
array([[23.14863786, 23.15704105, 23.16198479, 23.15823535, 23.20520085,
23.21260555, 23.21046409, 23.24065869, 23.23249237, 23.22397707,
23.23574067, 23.23837057, 23.23399503, 23.2241289 , 23.2268484 ,
23.2359942 , 23.26505353, 23.28655792, 23.32705397, 23.34043682,
23.35130615],
[23.15704105, 23.16198479, 23.15823535, 23.20520085, 23.21260555,
23.21046409, 23.24065869, 23.23249237, 23.22397707, 23.23574067,
23.23837057, 23.23399503, 23.2241289 , 23.2268484 , 23.2359942 ,
23.26505353, 23.28655792, 23.32705397, 23.34043682, 23.35130615,
23.3857737 ],
[23.16198479, 23.15823535, 23.20520085, 23.21260555, 23.21046409,
23.24065869, 23.23249237, 23.22397707, 23.23574067, 23.23837057,
23.23399503, 23.2241289 , 23.2268484 , 23.2359942 , 23.26505353,
23.28655792, 23.32705397, 23.34043682, 23.35130615, 23.3857737 ,
23.3812624 ],
[23.15823535, 23.20520085, 23.21260555, 23.21046409, 23.24065869,
23.23249237, 23.22397707, 23.23574067, 23.23837057, 23.23399503,
23.2241289 , 23.2268484 , 23.2359942 , 23.26505353, 23.28655792,
23.32705397, 23.34043682, 23.35130615, 23.3857737 , 23.3812624 ,
23.42282649],
[23.20520085, 23.21260555, 23.21046409, 23.24065869, 23.23249237,
23.22397707, 23.23574067, 23.23837057, 23.23399503, 23.2241289 ,
23.2268484 , 23.2359942 , 23.26505353, 23.28655792, 23.32705397,
23.34043682, 23.35130615, 23.3857737 , 23.3812624 , 23.42282649,
23.42512263],
[23.21260555, 23.21046409, 23.24065869, 23.23249237, 23.22397707,
23.23574067, 23.23837057, 23.23399503, 23.2241289 , 23.2268484 ,
23.2359942 , 23.26505353, 23.28655792, 23.32705397, 23.34043682,
23.35130615, 23.3857737 , 23.3812624 , 23.42282649, 23.42512263,
23.42961 ],
[23.21046409, 23.24065869, 23.23249237, 23.22397707, 23.23574067,
23.23837057, 23.23399503, 23.2241289 , 23.2268484 , 23.2359942 ,
23.26505353, 23.28655792, 23.32705397, 23.34043682, 23.35130615,
23.3857737 , 23.3812624 , 23.42282649, 23.42512263, 23.42961 ,
23.4202457 ],
[23.24065869, 23.23249237, 23.22397707, 23.23574067, 23.23837057,
23.23399503, 23.2241289 , 23.2268484 , 23.2359942 , 23.26505353,
23.28655792, 23.32705397, 23.34043682, 23.35130615, 23.3857737 ,
23.3812624 , 23.42282649, 23.42512263, 23.42961 , 23.4202457 ,
23.42937516],
[23.23249237, 23.22397707, 23.23574067, 23.23837057, 23.23399503,
23.2241289 , 23.2268484 , 23.2359942 , 23.26505353, 23.28655792,
23.32705397, 23.34043682, 23.35130615, 23.3857737 , 23.3812624 ,
23.42282649, 23.42512263, 23.42961 , 23.4202457 , 23.42937516,
23.42296848],
[23.22397707, 23.23574067, 23.23837057, 23.23399503, 23.2241289 ,
23.2268484 , 23.2359942 , 23.26505353, 23.28655792, 23.32705397,
23.34043682, 23.35130615, 23.3857737 , 23.3812624 , 23.42282649,
23.42512263, 23.42961 , 23.4202457 , 23.42937516, 23.42296848,
23.4322359 ],
[23.23574067, 23.23837057, 23.23399503, 23.2241289 , 23.2268484 ,
23.2359942 , 23.26505353, 23.28655792, 23.32705397, 23.34043682,
23.35130615, 23.3857737 , 23.3812624 , 23.42282649, 23.42512263,
23.42961 , 23.4202457 , 23.42937516, 23.42296848, 23.4322359 ,
23.42448096],
[23.23837057, 23.23399503, 23.2241289 , 23.2268484 , 23.2359942 ,
23.26505353, 23.28655792, 23.32705397, 23.34043682, 23.35130615,
23.3857737 , 23.3812624 , 23.42282649, 23.42512263, 23.42961 ,
23.4202457 , 23.42937516, 23.42296848, 23.4322359 , 23.42448096,
23.41411037],
[23.23399503, 23.2241289 , 23.2268484 , 23.2359942 , 23.26505353,
23.28655792, 23.32705397, 23.34043682, 23.35130615, 23.3857737 ,
23.3812624 , 23.42282649, 23.42512263, 23.42961 , 23.4202457 ,
23.42937516, 23.42296848, 23.4322359 , 23.42448096, 23.41411037,
23.40281077],
[23.2241289 , 23.2268484 , 23.2359942 , 23.26505353, 23.28655792,
23.32705397, 23.34043682, 23.35130615, 23.3857737 , 23.3812624 ,
23.42282649, 23.42512263, 23.42961 , 23.4202457 , 23.42937516,
23.42296848, 23.4322359 , 23.42448096, 23.41411037, 23.40281077,
23.41919019],
[23.2268484 , 23.2359942 , 23.26505353, 23.28655792, 23.32705397,
23.34043682, 23.35130615, 23.3857737 , 23.3812624 , 23.42282649,
23.42512263, 23.42961 , 23.4202457 , 23.42937516, 23.42296848,
23.4322359 , 23.42448096, 23.41411037, 23.40281077, 23.41919019,
23.4221201 ],
[23.2359942 , 23.26505353, 23.28655792, 23.32705397, 23.34043682,
23.35130615, 23.3857737 , 23.3812624 , 23.42282649, 23.42512263,
23.42961 , 23.4202457 , 23.42937516, 23.42296848, 23.4322359 ,
23.42448096, 23.41411037, 23.40281077, 23.41919019, 23.4221201 ,
23.47572133],
[23.26505353, 23.28655792, 23.32705397, 23.34043682, 23.35130615,
23.3857737 , 23.3812624 , 23.42282649, 23.42512263, 23.42961 ,
23.4202457 , 23.42937516, 23.42296848, 23.4322359 , 23.42448096,
23.41411037, 23.40281077, 23.41919019, 23.4221201 , 23.47572133,
23.52271821],
[23.28655792, 23.32705397, 23.34043682, 23.35130615, 23.3857737 ,
23.3812624 , 23.42282649, 23.42512263, 23.42961 , 23.4202457 ,
23.42937516, 23.42296848, 23.4322359 , 23.42448096, 23.41411037,
23.40281077, 23.41919019, 23.4221201 , 23.47572133, 23.52271821,
23.61756154],
[23.32705397, 23.34043682, 23.35130615, 23.3857737 , 23.3812624 ,
23.42282649, 23.42512263, 23.42961 , 23.4202457 , 23.42937516,
23.42296848, 23.4322359 , 23.42448096, 23.41411037, 23.40281077,
23.41919019, 23.4221201 , 23.47572133, 23.52271821, 23.61756154,
23.59677797],
[23.34043682, 23.35130615, 23.3857737 , 23.3812624 , 23.42282649,
23.42512263, 23.42961 , 23.4202457 , 23.42937516, 23.42296848,
23.4322359 , 23.42448096, 23.41411037, 23.40281077, 23.41919019,
23.4221201 , 23.47572133, 23.52271821, 23.61756154, 23.59677797,
23.61685994],
[23.35130615, 23.3857737 , 23.3812624 , 23.42282649, 23.42512263,
23.42961 , 23.4202457 , 23.42937516, 23.42296848, 23.4322359 ,
23.42448096, 23.41411037, 23.40281077, 23.41919019, 23.4221201 ,
23.47572133, 23.52271821, 23.61756154, 23.59677797, 23.61685994,
23.61039816]])
y_test_at21
array([[23.3857737 ],
[23.3812624 ],
[23.42282649],
[23.42512263],
[23.42961 ],
[23.4202457 ],
[23.42937516],
[23.42296848],
[23.4322359 ],
[23.42448096],
[23.41411037],
[23.40281077],
[23.41919019],
[23.4221201 ],
[23.47572133],
[23.52271821],
[23.61756154],
[23.59677797],
[23.61685994],
[23.61039816],
[23.62880786]])
Horizonte de 28 días (\(\tau=28\))
X_train_at28, y_train_at28, X_val_at28, y_val_at28, X_test_at28, y_test_at28 = create_time_series_datasets(df_1_st,'A_t',tau28)
# Imprimir las dimensiones
print("X_train shape:", X_train_at28.shape)
print("y_train shape:", y_train_at28.shape)
print("X_val shape:", X_val_at28.shape)
print("y_val shape:", y_val_at28.shape)
print("X_test shape:", X_test_at28.shape)
print("y_test shape:", y_test_at28.shape)
X_train shape: (4887, 28)
y_train shape: (4887, 1)
X_val shape: (28, 28)
y_val shape: (28, 1)
X_test shape: (28, 28)
y_test shape: (28, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 28\).
X_train_at28
array([[ 0. , 0. , 0. , ..., 0. ,
0. , 0. ],
[ 0. , 0. , 0. , ..., 0. ,
0. , 0. ],
[ 0. , 0. , 0. , ..., 0. ,
0. , 0. ],
...,
[23.10750215, 23.12741003, 23.14038065, ..., 23.20938681,
23.23133759, 23.21141884],
[23.12741003, 23.14038065, 23.19085825, ..., 23.23133759,
23.21141884, 23.19947223],
[23.14038065, 23.19085825, 23.24059651, ..., 23.21141884,
23.19947223, 23.20100054]])
y_train_at28
array([[ 0. ],
[ 0. ],
[ 0. ],
...,
[23.19947223],
[23.20100054],
[23.20422339]])
X_val_at28
array([[23.19085825, 23.24059651, 23.23379239, 23.22267678, 23.24313893,
23.23279156, 23.23445219, 23.17656587, 23.18216261, 23.21584786,
23.21914509, 23.19365115, 23.20182449, 23.18046268, 23.21166985,
23.20228394, 23.23549556, 23.24014712, 23.24249518, 23.23661603,
23.21994036, 23.23383006, 23.20938681, 23.23133759, 23.21141884,
23.19947223, 23.20100054, 23.20422339],
[23.24059651, 23.23379239, 23.22267678, 23.24313893, 23.23279156,
23.23445219, 23.17656587, 23.18216261, 23.21584786, 23.21914509,
23.19365115, 23.20182449, 23.18046268, 23.21166985, 23.20228394,
23.23549556, 23.24014712, 23.24249518, 23.23661603, 23.21994036,
23.23383006, 23.20938681, 23.23133759, 23.21141884, 23.19947223,
23.20100054, 23.20422339, 23.24942772],
[23.23379239, 23.22267678, 23.24313893, 23.23279156, 23.23445219,
23.17656587, 23.18216261, 23.21584786, 23.21914509, 23.19365115,
23.20182449, 23.18046268, 23.21166985, 23.20228394, 23.23549556,
23.24014712, 23.24249518, 23.23661603, 23.21994036, 23.23383006,
23.20938681, 23.23133759, 23.21141884, 23.19947223, 23.20100054,
23.20422339, 23.24942772, 23.26663554],
[23.22267678, 23.24313893, 23.23279156, 23.23445219, 23.17656587,
23.18216261, 23.21584786, 23.21914509, 23.19365115, 23.20182449,
23.18046268, 23.21166985, 23.20228394, 23.23549556, 23.24014712,
23.24249518, 23.23661603, 23.21994036, 23.23383006, 23.20938681,
23.23133759, 23.21141884, 23.19947223, 23.20100054, 23.20422339,
23.24942772, 23.26663554, 23.21974131],
[23.24313893, 23.23279156, 23.23445219, 23.17656587, 23.18216261,
23.21584786, 23.21914509, 23.19365115, 23.20182449, 23.18046268,
23.21166985, 23.20228394, 23.23549556, 23.24014712, 23.24249518,
23.23661603, 23.21994036, 23.23383006, 23.20938681, 23.23133759,
23.21141884, 23.19947223, 23.20100054, 23.20422339, 23.24942772,
23.26663554, 23.21974131, 23.25057745],
[23.23279156, 23.23445219, 23.17656587, 23.18216261, 23.21584786,
23.21914509, 23.19365115, 23.20182449, 23.18046268, 23.21166985,
23.20228394, 23.23549556, 23.24014712, 23.24249518, 23.23661603,
23.21994036, 23.23383006, 23.20938681, 23.23133759, 23.21141884,
23.19947223, 23.20100054, 23.20422339, 23.24942772, 23.26663554,
23.21974131, 23.25057745, 23.25057519],
[23.23445219, 23.17656587, 23.18216261, 23.21584786, 23.21914509,
23.19365115, 23.20182449, 23.18046268, 23.21166985, 23.20228394,
23.23549556, 23.24014712, 23.24249518, 23.23661603, 23.21994036,
23.23383006, 23.20938681, 23.23133759, 23.21141884, 23.19947223,
23.20100054, 23.20422339, 23.24942772, 23.26663554, 23.21974131,
23.25057745, 23.25057519, 23.246295 ],
[23.17656587, 23.18216261, 23.21584786, 23.21914509, 23.19365115,
23.20182449, 23.18046268, 23.21166985, 23.20228394, 23.23549556,
23.24014712, 23.24249518, 23.23661603, 23.21994036, 23.23383006,
23.20938681, 23.23133759, 23.21141884, 23.19947223, 23.20100054,
23.20422339, 23.24942772, 23.26663554, 23.21974131, 23.25057745,
23.25057519, 23.246295 , 23.2453716 ],
[23.18216261, 23.21584786, 23.21914509, 23.19365115, 23.20182449,
23.18046268, 23.21166985, 23.20228394, 23.23549556, 23.24014712,
23.24249518, 23.23661603, 23.21994036, 23.23383006, 23.20938681,
23.23133759, 23.21141884, 23.19947223, 23.20100054, 23.20422339,
23.24942772, 23.26663554, 23.21974131, 23.25057745, 23.25057519,
23.246295 , 23.2453716 , 23.31446075],
[23.21584786, 23.21914509, 23.19365115, 23.20182449, 23.18046268,
23.21166985, 23.20228394, 23.23549556, 23.24014712, 23.24249518,
23.23661603, 23.21994036, 23.23383006, 23.20938681, 23.23133759,
23.21141884, 23.19947223, 23.20100054, 23.20422339, 23.24942772,
23.26663554, 23.21974131, 23.25057745, 23.25057519, 23.246295 ,
23.2453716 , 23.31446075, 23.29671882],
[23.21914509, 23.19365115, 23.20182449, 23.18046268, 23.21166985,
23.20228394, 23.23549556, 23.24014712, 23.24249518, 23.23661603,
23.21994036, 23.23383006, 23.20938681, 23.23133759, 23.21141884,
23.19947223, 23.20100054, 23.20422339, 23.24942772, 23.26663554,
23.21974131, 23.25057745, 23.25057519, 23.246295 , 23.2453716 ,
23.31446075, 23.29671882, 23.30756449],
[23.19365115, 23.20182449, 23.18046268, 23.21166985, 23.20228394,
23.23549556, 23.24014712, 23.24249518, 23.23661603, 23.21994036,
23.23383006, 23.20938681, 23.23133759, 23.21141884, 23.19947223,
23.20100054, 23.20422339, 23.24942772, 23.26663554, 23.21974131,
23.25057745, 23.25057519, 23.246295 , 23.2453716 , 23.31446075,
23.29671882, 23.30756449, 23.30153609],
[23.20182449, 23.18046268, 23.21166985, 23.20228394, 23.23549556,
23.24014712, 23.24249518, 23.23661603, 23.21994036, 23.23383006,
23.20938681, 23.23133759, 23.21141884, 23.19947223, 23.20100054,
23.20422339, 23.24942772, 23.26663554, 23.21974131, 23.25057745,
23.25057519, 23.246295 , 23.2453716 , 23.31446075, 23.29671882,
23.30756449, 23.30153609, 23.22575537],
[23.18046268, 23.21166985, 23.20228394, 23.23549556, 23.24014712,
23.24249518, 23.23661603, 23.21994036, 23.23383006, 23.20938681,
23.23133759, 23.21141884, 23.19947223, 23.20100054, 23.20422339,
23.24942772, 23.26663554, 23.21974131, 23.25057745, 23.25057519,
23.246295 , 23.2453716 , 23.31446075, 23.29671882, 23.30756449,
23.30153609, 23.22575537, 23.22611489],
[23.21166985, 23.20228394, 23.23549556, 23.24014712, 23.24249518,
23.23661603, 23.21994036, 23.23383006, 23.20938681, 23.23133759,
23.21141884, 23.19947223, 23.20100054, 23.20422339, 23.24942772,
23.26663554, 23.21974131, 23.25057745, 23.25057519, 23.246295 ,
23.2453716 , 23.31446075, 23.29671882, 23.30756449, 23.30153609,
23.22575537, 23.22611489, 23.20032337],
[23.20228394, 23.23549556, 23.24014712, 23.24249518, 23.23661603,
23.21994036, 23.23383006, 23.20938681, 23.23133759, 23.21141884,
23.19947223, 23.20100054, 23.20422339, 23.24942772, 23.26663554,
23.21974131, 23.25057745, 23.25057519, 23.246295 , 23.2453716 ,
23.31446075, 23.29671882, 23.30756449, 23.30153609, 23.22575537,
23.22611489, 23.20032337, 23.21863885],
[23.23549556, 23.24014712, 23.24249518, 23.23661603, 23.21994036,
23.23383006, 23.20938681, 23.23133759, 23.21141884, 23.19947223,
23.20100054, 23.20422339, 23.24942772, 23.26663554, 23.21974131,
23.25057745, 23.25057519, 23.246295 , 23.2453716 , 23.31446075,
23.29671882, 23.30756449, 23.30153609, 23.22575537, 23.22611489,
23.20032337, 23.21863885, 23.23357156],
[23.24014712, 23.24249518, 23.23661603, 23.21994036, 23.23383006,
23.20938681, 23.23133759, 23.21141884, 23.19947223, 23.20100054,
23.20422339, 23.24942772, 23.26663554, 23.21974131, 23.25057745,
23.25057519, 23.246295 , 23.2453716 , 23.31446075, 23.29671882,
23.30756449, 23.30153609, 23.22575537, 23.22611489, 23.20032337,
23.21863885, 23.23357156, 23.22483832],
[23.24249518, 23.23661603, 23.21994036, 23.23383006, 23.20938681,
23.23133759, 23.21141884, 23.19947223, 23.20100054, 23.20422339,
23.24942772, 23.26663554, 23.21974131, 23.25057745, 23.25057519,
23.246295 , 23.2453716 , 23.31446075, 23.29671882, 23.30756449,
23.30153609, 23.22575537, 23.22611489, 23.20032337, 23.21863885,
23.23357156, 23.22483832, 23.1903271 ],
[23.23661603, 23.21994036, 23.23383006, 23.20938681, 23.23133759,
23.21141884, 23.19947223, 23.20100054, 23.20422339, 23.24942772,
23.26663554, 23.21974131, 23.25057745, 23.25057519, 23.246295 ,
23.2453716 , 23.31446075, 23.29671882, 23.30756449, 23.30153609,
23.22575537, 23.22611489, 23.20032337, 23.21863885, 23.23357156,
23.22483832, 23.1903271 , 23.19893153],
[23.21994036, 23.23383006, 23.20938681, 23.23133759, 23.21141884,
23.19947223, 23.20100054, 23.20422339, 23.24942772, 23.26663554,
23.21974131, 23.25057745, 23.25057519, 23.246295 , 23.2453716 ,
23.31446075, 23.29671882, 23.30756449, 23.30153609, 23.22575537,
23.22611489, 23.20032337, 23.21863885, 23.23357156, 23.22483832,
23.1903271 , 23.19893153, 23.20006964],
[23.23383006, 23.20938681, 23.23133759, 23.21141884, 23.19947223,
23.20100054, 23.20422339, 23.24942772, 23.26663554, 23.21974131,
23.25057745, 23.25057519, 23.246295 , 23.2453716 , 23.31446075,
23.29671882, 23.30756449, 23.30153609, 23.22575537, 23.22611489,
23.20032337, 23.21863885, 23.23357156, 23.22483832, 23.1903271 ,
23.19893153, 23.20006964, 23.1973787 ],
[23.20938681, 23.23133759, 23.21141884, 23.19947223, 23.20100054,
23.20422339, 23.24942772, 23.26663554, 23.21974131, 23.25057745,
23.25057519, 23.246295 , 23.2453716 , 23.31446075, 23.29671882,
23.30756449, 23.30153609, 23.22575537, 23.22611489, 23.20032337,
23.21863885, 23.23357156, 23.22483832, 23.1903271 , 23.19893153,
23.20006964, 23.1973787 , 23.14863786],
[23.23133759, 23.21141884, 23.19947223, 23.20100054, 23.20422339,
23.24942772, 23.26663554, 23.21974131, 23.25057745, 23.25057519,
23.246295 , 23.2453716 , 23.31446075, 23.29671882, 23.30756449,
23.30153609, 23.22575537, 23.22611489, 23.20032337, 23.21863885,
23.23357156, 23.22483832, 23.1903271 , 23.19893153, 23.20006964,
23.1973787 , 23.14863786, 23.15704105],
[23.21141884, 23.19947223, 23.20100054, 23.20422339, 23.24942772,
23.26663554, 23.21974131, 23.25057745, 23.25057519, 23.246295 ,
23.2453716 , 23.31446075, 23.29671882, 23.30756449, 23.30153609,
23.22575537, 23.22611489, 23.20032337, 23.21863885, 23.23357156,
23.22483832, 23.1903271 , 23.19893153, 23.20006964, 23.1973787 ,
23.14863786, 23.15704105, 23.16198479],
[23.19947223, 23.20100054, 23.20422339, 23.24942772, 23.26663554,
23.21974131, 23.25057745, 23.25057519, 23.246295 , 23.2453716 ,
23.31446075, 23.29671882, 23.30756449, 23.30153609, 23.22575537,
23.22611489, 23.20032337, 23.21863885, 23.23357156, 23.22483832,
23.1903271 , 23.19893153, 23.20006964, 23.1973787 , 23.14863786,
23.15704105, 23.16198479, 23.15823535],
[23.20100054, 23.20422339, 23.24942772, 23.26663554, 23.21974131,
23.25057745, 23.25057519, 23.246295 , 23.2453716 , 23.31446075,
23.29671882, 23.30756449, 23.30153609, 23.22575537, 23.22611489,
23.20032337, 23.21863885, 23.23357156, 23.22483832, 23.1903271 ,
23.19893153, 23.20006964, 23.1973787 , 23.14863786, 23.15704105,
23.16198479, 23.15823535, 23.20520085],
[23.20422339, 23.24942772, 23.26663554, 23.21974131, 23.25057745,
23.25057519, 23.246295 , 23.2453716 , 23.31446075, 23.29671882,
23.30756449, 23.30153609, 23.22575537, 23.22611489, 23.20032337,
23.21863885, 23.23357156, 23.22483832, 23.1903271 , 23.19893153,
23.20006964, 23.1973787 , 23.14863786, 23.15704105, 23.16198479,
23.15823535, 23.20520085, 23.21260555]])
y_val_at28
array([[23.24942772],
[23.26663554],
[23.21974131],
[23.25057745],
[23.25057519],
[23.246295 ],
[23.2453716 ],
[23.31446075],
[23.29671882],
[23.30756449],
[23.30153609],
[23.22575537],
[23.22611489],
[23.20032337],
[23.21863885],
[23.23357156],
[23.22483832],
[23.1903271 ],
[23.19893153],
[23.20006964],
[23.1973787 ],
[23.14863786],
[23.15704105],
[23.16198479],
[23.15823535],
[23.20520085],
[23.21260555],
[23.21046409]])
X_test_at28
array([[23.24942772, 23.26663554, 23.21974131, 23.25057745, 23.25057519,
23.246295 , 23.2453716 , 23.31446075, 23.29671882, 23.30756449,
23.30153609, 23.22575537, 23.22611489, 23.20032337, 23.21863885,
23.23357156, 23.22483832, 23.1903271 , 23.19893153, 23.20006964,
23.1973787 , 23.14863786, 23.15704105, 23.16198479, 23.15823535,
23.20520085, 23.21260555, 23.21046409],
[23.26663554, 23.21974131, 23.25057745, 23.25057519, 23.246295 ,
23.2453716 , 23.31446075, 23.29671882, 23.30756449, 23.30153609,
23.22575537, 23.22611489, 23.20032337, 23.21863885, 23.23357156,
23.22483832, 23.1903271 , 23.19893153, 23.20006964, 23.1973787 ,
23.14863786, 23.15704105, 23.16198479, 23.15823535, 23.20520085,
23.21260555, 23.21046409, 23.24065869],
[23.21974131, 23.25057745, 23.25057519, 23.246295 , 23.2453716 ,
23.31446075, 23.29671882, 23.30756449, 23.30153609, 23.22575537,
23.22611489, 23.20032337, 23.21863885, 23.23357156, 23.22483832,
23.1903271 , 23.19893153, 23.20006964, 23.1973787 , 23.14863786,
23.15704105, 23.16198479, 23.15823535, 23.20520085, 23.21260555,
23.21046409, 23.24065869, 23.23249237],
[23.25057745, 23.25057519, 23.246295 , 23.2453716 , 23.31446075,
23.29671882, 23.30756449, 23.30153609, 23.22575537, 23.22611489,
23.20032337, 23.21863885, 23.23357156, 23.22483832, 23.1903271 ,
23.19893153, 23.20006964, 23.1973787 , 23.14863786, 23.15704105,
23.16198479, 23.15823535, 23.20520085, 23.21260555, 23.21046409,
23.24065869, 23.23249237, 23.22397707],
[23.25057519, 23.246295 , 23.2453716 , 23.31446075, 23.29671882,
23.30756449, 23.30153609, 23.22575537, 23.22611489, 23.20032337,
23.21863885, 23.23357156, 23.22483832, 23.1903271 , 23.19893153,
23.20006964, 23.1973787 , 23.14863786, 23.15704105, 23.16198479,
23.15823535, 23.20520085, 23.21260555, 23.21046409, 23.24065869,
23.23249237, 23.22397707, 23.23574067],
[23.246295 , 23.2453716 , 23.31446075, 23.29671882, 23.30756449,
23.30153609, 23.22575537, 23.22611489, 23.20032337, 23.21863885,
23.23357156, 23.22483832, 23.1903271 , 23.19893153, 23.20006964,
23.1973787 , 23.14863786, 23.15704105, 23.16198479, 23.15823535,
23.20520085, 23.21260555, 23.21046409, 23.24065869, 23.23249237,
23.22397707, 23.23574067, 23.23837057],
[23.2453716 , 23.31446075, 23.29671882, 23.30756449, 23.30153609,
23.22575537, 23.22611489, 23.20032337, 23.21863885, 23.23357156,
23.22483832, 23.1903271 , 23.19893153, 23.20006964, 23.1973787 ,
23.14863786, 23.15704105, 23.16198479, 23.15823535, 23.20520085,
23.21260555, 23.21046409, 23.24065869, 23.23249237, 23.22397707,
23.23574067, 23.23837057, 23.23399503],
[23.31446075, 23.29671882, 23.30756449, 23.30153609, 23.22575537,
23.22611489, 23.20032337, 23.21863885, 23.23357156, 23.22483832,
23.1903271 , 23.19893153, 23.20006964, 23.1973787 , 23.14863786,
23.15704105, 23.16198479, 23.15823535, 23.20520085, 23.21260555,
23.21046409, 23.24065869, 23.23249237, 23.22397707, 23.23574067,
23.23837057, 23.23399503, 23.2241289 ],
[23.29671882, 23.30756449, 23.30153609, 23.22575537, 23.22611489,
23.20032337, 23.21863885, 23.23357156, 23.22483832, 23.1903271 ,
23.19893153, 23.20006964, 23.1973787 , 23.14863786, 23.15704105,
23.16198479, 23.15823535, 23.20520085, 23.21260555, 23.21046409,
23.24065869, 23.23249237, 23.22397707, 23.23574067, 23.23837057,
23.23399503, 23.2241289 , 23.2268484 ],
[23.30756449, 23.30153609, 23.22575537, 23.22611489, 23.20032337,
23.21863885, 23.23357156, 23.22483832, 23.1903271 , 23.19893153,
23.20006964, 23.1973787 , 23.14863786, 23.15704105, 23.16198479,
23.15823535, 23.20520085, 23.21260555, 23.21046409, 23.24065869,
23.23249237, 23.22397707, 23.23574067, 23.23837057, 23.23399503,
23.2241289 , 23.2268484 , 23.2359942 ],
[23.30153609, 23.22575537, 23.22611489, 23.20032337, 23.21863885,
23.23357156, 23.22483832, 23.1903271 , 23.19893153, 23.20006964,
23.1973787 , 23.14863786, 23.15704105, 23.16198479, 23.15823535,
23.20520085, 23.21260555, 23.21046409, 23.24065869, 23.23249237,
23.22397707, 23.23574067, 23.23837057, 23.23399503, 23.2241289 ,
23.2268484 , 23.2359942 , 23.26505353],
[23.22575537, 23.22611489, 23.20032337, 23.21863885, 23.23357156,
23.22483832, 23.1903271 , 23.19893153, 23.20006964, 23.1973787 ,
23.14863786, 23.15704105, 23.16198479, 23.15823535, 23.20520085,
23.21260555, 23.21046409, 23.24065869, 23.23249237, 23.22397707,
23.23574067, 23.23837057, 23.23399503, 23.2241289 , 23.2268484 ,
23.2359942 , 23.26505353, 23.28655792],
[23.22611489, 23.20032337, 23.21863885, 23.23357156, 23.22483832,
23.1903271 , 23.19893153, 23.20006964, 23.1973787 , 23.14863786,
23.15704105, 23.16198479, 23.15823535, 23.20520085, 23.21260555,
23.21046409, 23.24065869, 23.23249237, 23.22397707, 23.23574067,
23.23837057, 23.23399503, 23.2241289 , 23.2268484 , 23.2359942 ,
23.26505353, 23.28655792, 23.32705397],
[23.20032337, 23.21863885, 23.23357156, 23.22483832, 23.1903271 ,
23.19893153, 23.20006964, 23.1973787 , 23.14863786, 23.15704105,
23.16198479, 23.15823535, 23.20520085, 23.21260555, 23.21046409,
23.24065869, 23.23249237, 23.22397707, 23.23574067, 23.23837057,
23.23399503, 23.2241289 , 23.2268484 , 23.2359942 , 23.26505353,
23.28655792, 23.32705397, 23.34043682],
[23.21863885, 23.23357156, 23.22483832, 23.1903271 , 23.19893153,
23.20006964, 23.1973787 , 23.14863786, 23.15704105, 23.16198479,
23.15823535, 23.20520085, 23.21260555, 23.21046409, 23.24065869,
23.23249237, 23.22397707, 23.23574067, 23.23837057, 23.23399503,
23.2241289 , 23.2268484 , 23.2359942 , 23.26505353, 23.28655792,
23.32705397, 23.34043682, 23.35130615],
[23.23357156, 23.22483832, 23.1903271 , 23.19893153, 23.20006964,
23.1973787 , 23.14863786, 23.15704105, 23.16198479, 23.15823535,
23.20520085, 23.21260555, 23.21046409, 23.24065869, 23.23249237,
23.22397707, 23.23574067, 23.23837057, 23.23399503, 23.2241289 ,
23.2268484 , 23.2359942 , 23.26505353, 23.28655792, 23.32705397,
23.34043682, 23.35130615, 23.3857737 ],
[23.22483832, 23.1903271 , 23.19893153, 23.20006964, 23.1973787 ,
23.14863786, 23.15704105, 23.16198479, 23.15823535, 23.20520085,
23.21260555, 23.21046409, 23.24065869, 23.23249237, 23.22397707,
23.23574067, 23.23837057, 23.23399503, 23.2241289 , 23.2268484 ,
23.2359942 , 23.26505353, 23.28655792, 23.32705397, 23.34043682,
23.35130615, 23.3857737 , 23.3812624 ],
[23.1903271 , 23.19893153, 23.20006964, 23.1973787 , 23.14863786,
23.15704105, 23.16198479, 23.15823535, 23.20520085, 23.21260555,
23.21046409, 23.24065869, 23.23249237, 23.22397707, 23.23574067,
23.23837057, 23.23399503, 23.2241289 , 23.2268484 , 23.2359942 ,
23.26505353, 23.28655792, 23.32705397, 23.34043682, 23.35130615,
23.3857737 , 23.3812624 , 23.42282649],
[23.19893153, 23.20006964, 23.1973787 , 23.14863786, 23.15704105,
23.16198479, 23.15823535, 23.20520085, 23.21260555, 23.21046409,
23.24065869, 23.23249237, 23.22397707, 23.23574067, 23.23837057,
23.23399503, 23.2241289 , 23.2268484 , 23.2359942 , 23.26505353,
23.28655792, 23.32705397, 23.34043682, 23.35130615, 23.3857737 ,
23.3812624 , 23.42282649, 23.42512263],
[23.20006964, 23.1973787 , 23.14863786, 23.15704105, 23.16198479,
23.15823535, 23.20520085, 23.21260555, 23.21046409, 23.24065869,
23.23249237, 23.22397707, 23.23574067, 23.23837057, 23.23399503,
23.2241289 , 23.2268484 , 23.2359942 , 23.26505353, 23.28655792,
23.32705397, 23.34043682, 23.35130615, 23.3857737 , 23.3812624 ,
23.42282649, 23.42512263, 23.42961 ],
[23.1973787 , 23.14863786, 23.15704105, 23.16198479, 23.15823535,
23.20520085, 23.21260555, 23.21046409, 23.24065869, 23.23249237,
23.22397707, 23.23574067, 23.23837057, 23.23399503, 23.2241289 ,
23.2268484 , 23.2359942 , 23.26505353, 23.28655792, 23.32705397,
23.34043682, 23.35130615, 23.3857737 , 23.3812624 , 23.42282649,
23.42512263, 23.42961 , 23.4202457 ],
[23.14863786, 23.15704105, 23.16198479, 23.15823535, 23.20520085,
23.21260555, 23.21046409, 23.24065869, 23.23249237, 23.22397707,
23.23574067, 23.23837057, 23.23399503, 23.2241289 , 23.2268484 ,
23.2359942 , 23.26505353, 23.28655792, 23.32705397, 23.34043682,
23.35130615, 23.3857737 , 23.3812624 , 23.42282649, 23.42512263,
23.42961 , 23.4202457 , 23.42937516],
[23.15704105, 23.16198479, 23.15823535, 23.20520085, 23.21260555,
23.21046409, 23.24065869, 23.23249237, 23.22397707, 23.23574067,
23.23837057, 23.23399503, 23.2241289 , 23.2268484 , 23.2359942 ,
23.26505353, 23.28655792, 23.32705397, 23.34043682, 23.35130615,
23.3857737 , 23.3812624 , 23.42282649, 23.42512263, 23.42961 ,
23.4202457 , 23.42937516, 23.42296848],
[23.16198479, 23.15823535, 23.20520085, 23.21260555, 23.21046409,
23.24065869, 23.23249237, 23.22397707, 23.23574067, 23.23837057,
23.23399503, 23.2241289 , 23.2268484 , 23.2359942 , 23.26505353,
23.28655792, 23.32705397, 23.34043682, 23.35130615, 23.3857737 ,
23.3812624 , 23.42282649, 23.42512263, 23.42961 , 23.4202457 ,
23.42937516, 23.42296848, 23.4322359 ],
[23.15823535, 23.20520085, 23.21260555, 23.21046409, 23.24065869,
23.23249237, 23.22397707, 23.23574067, 23.23837057, 23.23399503,
23.2241289 , 23.2268484 , 23.2359942 , 23.26505353, 23.28655792,
23.32705397, 23.34043682, 23.35130615, 23.3857737 , 23.3812624 ,
23.42282649, 23.42512263, 23.42961 , 23.4202457 , 23.42937516,
23.42296848, 23.4322359 , 23.42448096],
[23.20520085, 23.21260555, 23.21046409, 23.24065869, 23.23249237,
23.22397707, 23.23574067, 23.23837057, 23.23399503, 23.2241289 ,
23.2268484 , 23.2359942 , 23.26505353, 23.28655792, 23.32705397,
23.34043682, 23.35130615, 23.3857737 , 23.3812624 , 23.42282649,
23.42512263, 23.42961 , 23.4202457 , 23.42937516, 23.42296848,
23.4322359 , 23.42448096, 23.41411037],
[23.21260555, 23.21046409, 23.24065869, 23.23249237, 23.22397707,
23.23574067, 23.23837057, 23.23399503, 23.2241289 , 23.2268484 ,
23.2359942 , 23.26505353, 23.28655792, 23.32705397, 23.34043682,
23.35130615, 23.3857737 , 23.3812624 , 23.42282649, 23.42512263,
23.42961 , 23.4202457 , 23.42937516, 23.42296848, 23.4322359 ,
23.42448096, 23.41411037, 23.40281077],
[23.21046409, 23.24065869, 23.23249237, 23.22397707, 23.23574067,
23.23837057, 23.23399503, 23.2241289 , 23.2268484 , 23.2359942 ,
23.26505353, 23.28655792, 23.32705397, 23.34043682, 23.35130615,
23.3857737 , 23.3812624 , 23.42282649, 23.42512263, 23.42961 ,
23.4202457 , 23.42937516, 23.42296848, 23.4322359 , 23.42448096,
23.41411037, 23.40281077, 23.41919019]])
y_test_at28
array([[23.24065869],
[23.23249237],
[23.22397707],
[23.23574067],
[23.23837057],
[23.23399503],
[23.2241289 ],
[23.2268484 ],
[23.2359942 ],
[23.26505353],
[23.28655792],
[23.32705397],
[23.34043682],
[23.35130615],
[23.3857737 ],
[23.3812624 ],
[23.42282649],
[23.42512263],
[23.42961 ],
[23.4202457 ],
[23.42937516],
[23.42296848],
[23.4322359 ],
[23.42448096],
[23.41411037],
[23.40281077],
[23.41919019],
[23.4221201 ]])
Volatilidad#
\(\omega = 7\)
Horizonte de 7 días (\(\tau=7\))
X_train_vola7, y_train_vola7, X_val_vola7, y_val_vola7, X_test_vola7, y_test_vola7 = create_time_series_datasets(df_1_st,'Volatilidad_7',tau7)
# Imprimir las dimensiones
print("X_train shape:", X_train_vola7.shape)
print("y_train shape:", y_train_vola7.shape)
print("X_val shape:", X_val_vola7.shape)
print("y_val shape:", y_val_vola7.shape)
print("X_test shape:", X_test_vola7.shape)
print("y_test shape:", y_test_vola7.shape)
X_train shape: (4971, 7)
y_train shape: (4971, 1)
X_val shape: (7, 7)
y_val shape: (7, 1)
X_test shape: (7, 7)
y_test shape: (7, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 7\).
X_train_vola7
array([[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
...,
[0.00904375, 0.01135615, 0.01073662, ..., 0.0273388 , 0.03929255,
0.04112336],
[0.01135615, 0.01073662, 0.02285338, ..., 0.03929255, 0.04112336,
0.03795584],
[0.01073662, 0.02285338, 0.0273388 , ..., 0.04112336, 0.03795584,
0.04029001]])
y_train_vola7
array([[0. ],
[0. ],
[0. ],
...,
[0.03795584],
[0.04029001],
[0.03913681]])
X_val_vola7
array([[0.02285338, 0.0273388 , 0.03929255, 0.04112336, 0.03795584,
0.04029001, 0.03913681],
[0.0273388 , 0.03929255, 0.04112336, 0.03795584, 0.04029001,
0.03913681, 0.04317306],
[0.03929255, 0.04112336, 0.03795584, 0.04029001, 0.03913681,
0.04317306, 0.05628134],
[0.04112336, 0.03795584, 0.04029001, 0.03913681, 0.04317306,
0.05628134, 0.04628254],
[0.03795584, 0.04029001, 0.03913681, 0.04317306, 0.05628134,
0.04628254, 0.0443908 ],
[0.04029001, 0.03913681, 0.04317306, 0.05628134, 0.04628254,
0.0443908 , 0.04438165],
[0.03913681, 0.04317306, 0.05628134, 0.04628254, 0.0443908 ,
0.04438165, 0.04381709]])
y_val_vola7
array([[0.04317306],
[0.05628134],
[0.04628254],
[0.0443908 ],
[0.04438165],
[0.04381709],
[0.04384013]])
X_test_vola7
array([[0.04317306, 0.05628134, 0.04628254, 0.0443908 , 0.04438165,
0.04381709, 0.04384013],
[0.05628134, 0.04628254, 0.0443908 , 0.04438165, 0.04381709,
0.04384013, 0.0359863 ],
[0.04628254, 0.0443908 , 0.04438165, 0.04381709, 0.04384013,
0.0359863 , 0.01883419],
[0.0443908 , 0.04438165, 0.04381709, 0.04384013, 0.0359863 ,
0.01883419, 0.01713542],
[0.04438165, 0.04381709, 0.04384013, 0.0359863 , 0.01883419,
0.01713542, 0.02233183],
[0.04381709, 0.04384013, 0.0359863 , 0.01883419, 0.01713542,
0.02233183, 0.02555311],
[0.04384013, 0.0359863 , 0.01883419, 0.01713542, 0.02233183,
0.02555311, 0.03485504]])
y_test_vola7
array([[0.0359863 ],
[0.01883419],
[0.01713542],
[0.02233183],
[0.02555311],
[0.03485504],
[0.0401675 ]])
Horizonte de 14 días (\(\tau=14\))
X_train_vola14, y_train_vola14, X_val_vola14, y_val_vola14, X_test_vola14, y_test_vola14 = create_time_series_datasets(df_1_st,'Volatilidad_7',tau14)
# Imprimir las dimensiones
print("X_train shape:", X_train_vola14.shape)
print("y_train shape:", y_train_vola14.shape)
print("X_val shape:", X_val_vola14.shape)
print("y_val shape:", y_val_vola14.shape)
print("X_test shape:", X_test_vola14.shape)
print("y_test shape:", y_test_vola14.shape)
X_train shape: (4943, 14)
y_train shape: (4943, 1)
X_val shape: (14, 14)
y_val shape: (14, 1)
X_test shape: (14, 14)
y_test shape: (14, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 14\).
X_train_vola14
array([[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
...,
[0.02796862, 0.02807775, 0.0280639 , ..., 0.00840223, 0.01262426,
0.0138589 ],
[0.02807775, 0.0280639 , 0.01860246, ..., 0.01262426, 0.0138589 ,
0.01839749],
[0.0280639 , 0.01860246, 0.02028465, ..., 0.0138589 , 0.01839749,
0.01681143]])
y_train_vola14
array([[0. ],
[0. ],
[0. ],
...,
[0.01839749],
[0.01681143],
[0.01306726]])
X_val_vola14
array([[0.01860246, 0.02028465, 0.02152959, 0.0208019 , 0.01353053,
0.01387194, 0.01464025, 0.00797008, 0.00840223, 0.01262426,
0.0138589 , 0.01839749, 0.01681143, 0.01306726],
[0.02028465, 0.02152959, 0.0208019 , 0.01353053, 0.01387194,
0.01464025, 0.00797008, 0.00840223, 0.01262426, 0.0138589 ,
0.01839749, 0.01681143, 0.01306726, 0.01229862],
[0.02152959, 0.0208019 , 0.01353053, 0.01387194, 0.01464025,
0.00797008, 0.00840223, 0.01262426, 0.0138589 , 0.01839749,
0.01681143, 0.01306726, 0.01229862, 0.0154797 ],
[0.0208019 , 0.01353053, 0.01387194, 0.01464025, 0.00797008,
0.00840223, 0.01262426, 0.0138589 , 0.01839749, 0.01681143,
0.01306726, 0.01229862, 0.0154797 , 0.01722162],
[0.01353053, 0.01387194, 0.01464025, 0.00797008, 0.00840223,
0.01262426, 0.0138589 , 0.01839749, 0.01681143, 0.01306726,
0.01229862, 0.0154797 , 0.01722162, 0.01886575],
[0.01387194, 0.01464025, 0.00797008, 0.00840223, 0.01262426,
0.0138589 , 0.01839749, 0.01681143, 0.01306726, 0.01229862,
0.0154797 , 0.01722162, 0.01886575, 0.01710777],
[0.01464025, 0.00797008, 0.00840223, 0.01262426, 0.0138589 ,
0.01839749, 0.01681143, 0.01306726, 0.01229862, 0.0154797 ,
0.01722162, 0.01886575, 0.01710777, 0.01939611],
[0.00797008, 0.00840223, 0.01262426, 0.0138589 , 0.01839749,
0.01681143, 0.01306726, 0.01229862, 0.0154797 , 0.01722162,
0.01886575, 0.01710777, 0.01939611, 0.0194152 ],
[0.00840223, 0.01262426, 0.0138589 , 0.01839749, 0.01681143,
0.01306726, 0.01229862, 0.0154797 , 0.01722162, 0.01886575,
0.01710777, 0.01939611, 0.0194152 , 0.01726162],
[0.01262426, 0.0138589 , 0.01839749, 0.01681143, 0.01306726,
0.01229862, 0.0154797 , 0.01722162, 0.01886575, 0.01710777,
0.01939611, 0.0194152 , 0.01726162, 0.01673203],
[0.0138589 , 0.01839749, 0.01681143, 0.01306726, 0.01229862,
0.0154797 , 0.01722162, 0.01886575, 0.01710777, 0.01939611,
0.0194152 , 0.01726162, 0.01673203, 0.00799087],
[0.01839749, 0.01681143, 0.01306726, 0.01229862, 0.0154797 ,
0.01722162, 0.01886575, 0.01710777, 0.01939611, 0.0194152 ,
0.01726162, 0.01673203, 0.00799087, 0.00883617],
[0.01681143, 0.01306726, 0.01229862, 0.0154797 , 0.01722162,
0.01886575, 0.01710777, 0.01939611, 0.0194152 , 0.01726162,
0.01673203, 0.00799087, 0.00883617, 0.00904375],
[0.01306726, 0.01229862, 0.0154797 , 0.01722162, 0.01886575,
0.01710777, 0.01939611, 0.0194152 , 0.01726162, 0.01673203,
0.00799087, 0.00883617, 0.00904375, 0.01135615]])
y_val_vola14
array([[0.01229862],
[0.0154797 ],
[0.01722162],
[0.01886575],
[0.01710777],
[0.01939611],
[0.0194152 ],
[0.01726162],
[0.01673203],
[0.00799087],
[0.00883617],
[0.00904375],
[0.01135615],
[0.01073662]])
X_test_vola14
array([[0.01229862, 0.0154797 , 0.01722162, 0.01886575, 0.01710777,
0.01939611, 0.0194152 , 0.01726162, 0.01673203, 0.00799087,
0.00883617, 0.00904375, 0.01135615, 0.01073662],
[0.0154797 , 0.01722162, 0.01886575, 0.01710777, 0.01939611,
0.0194152 , 0.01726162, 0.01673203, 0.00799087, 0.00883617,
0.00904375, 0.01135615, 0.01073662, 0.02285338],
[0.01722162, 0.01886575, 0.01710777, 0.01939611, 0.0194152 ,
0.01726162, 0.01673203, 0.00799087, 0.00883617, 0.00904375,
0.01135615, 0.01073662, 0.02285338, 0.0273388 ],
[0.01886575, 0.01710777, 0.01939611, 0.0194152 , 0.01726162,
0.01673203, 0.00799087, 0.00883617, 0.00904375, 0.01135615,
0.01073662, 0.02285338, 0.0273388 , 0.03929255],
[0.01710777, 0.01939611, 0.0194152 , 0.01726162, 0.01673203,
0.00799087, 0.00883617, 0.00904375, 0.01135615, 0.01073662,
0.02285338, 0.0273388 , 0.03929255, 0.04112336],
[0.01939611, 0.0194152 , 0.01726162, 0.01673203, 0.00799087,
0.00883617, 0.00904375, 0.01135615, 0.01073662, 0.02285338,
0.0273388 , 0.03929255, 0.04112336, 0.03795584],
[0.0194152 , 0.01726162, 0.01673203, 0.00799087, 0.00883617,
0.00904375, 0.01135615, 0.01073662, 0.02285338, 0.0273388 ,
0.03929255, 0.04112336, 0.03795584, 0.04029001],
[0.01726162, 0.01673203, 0.00799087, 0.00883617, 0.00904375,
0.01135615, 0.01073662, 0.02285338, 0.0273388 , 0.03929255,
0.04112336, 0.03795584, 0.04029001, 0.03913681],
[0.01673203, 0.00799087, 0.00883617, 0.00904375, 0.01135615,
0.01073662, 0.02285338, 0.0273388 , 0.03929255, 0.04112336,
0.03795584, 0.04029001, 0.03913681, 0.04317306],
[0.00799087, 0.00883617, 0.00904375, 0.01135615, 0.01073662,
0.02285338, 0.0273388 , 0.03929255, 0.04112336, 0.03795584,
0.04029001, 0.03913681, 0.04317306, 0.05628134],
[0.00883617, 0.00904375, 0.01135615, 0.01073662, 0.02285338,
0.0273388 , 0.03929255, 0.04112336, 0.03795584, 0.04029001,
0.03913681, 0.04317306, 0.05628134, 0.04628254],
[0.00904375, 0.01135615, 0.01073662, 0.02285338, 0.0273388 ,
0.03929255, 0.04112336, 0.03795584, 0.04029001, 0.03913681,
0.04317306, 0.05628134, 0.04628254, 0.0443908 ],
[0.01135615, 0.01073662, 0.02285338, 0.0273388 , 0.03929255,
0.04112336, 0.03795584, 0.04029001, 0.03913681, 0.04317306,
0.05628134, 0.04628254, 0.0443908 , 0.04438165],
[0.01073662, 0.02285338, 0.0273388 , 0.03929255, 0.04112336,
0.03795584, 0.04029001, 0.03913681, 0.04317306, 0.05628134,
0.04628254, 0.0443908 , 0.04438165, 0.04381709]])
y_test_vola14
array([[0.02285338],
[0.0273388 ],
[0.03929255],
[0.04112336],
[0.03795584],
[0.04029001],
[0.03913681],
[0.04317306],
[0.05628134],
[0.04628254],
[0.0443908 ],
[0.04438165],
[0.04381709],
[0.04384013]])
Horizonte de 21 días (\(\tau=21\))
X_train_vola21, y_train_vola21, X_val_vola21, y_val_vola21, X_test_vola21, y_test_vola21= create_time_series_datasets(df_1_st,'Volatilidad_7',tau21)
# Imprimir las dimensiones
print("X_train shape:", X_train_vola21.shape)
print("y_train shape:", y_train_vola21.shape)
print("X_val shape:", X_val_vola21.shape)
print("y_val shape:", y_val_vola21.shape)
print("X_test shape:", X_test_vola21.shape)
print("y_test shape:", y_test_vola21.shape)
X_train shape: (4915, 21)
y_train shape: (4915, 1)
X_val shape: (21, 21)
y_val shape: (21, 1)
X_test shape: (21, 21)
y_test shape: (21, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 21\).
X_train_vola21
array([[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
...,
[0.01760648, 0.01784617, 0.0172682 , ..., 0.03304242, 0.03188898,
0.03283039],
[0.01784617, 0.0172682 , 0.02463123, ..., 0.03188898, 0.03283039,
0.02025799],
[0.0172682 , 0.02463123, 0.02200521, ..., 0.03283039, 0.02025799,
0.02028695]])
y_train_vola21
array([[0. ],
[0. ],
[0. ],
...,
[0.02025799],
[0.02028695],
[0.01782821]])
X_val_vola21
array([[0.02463123, 0.02200521, 0.02907347, 0.03007176, 0.02923789,
0.02951017, 0.0296276 , 0.03567059, 0.03680559, 0.0290801 ,
0.02854897, 0.04265354, 0.04267626, 0.04351283, 0.03135964,
0.03304242, 0.03188898, 0.03283039, 0.02025799, 0.02028695,
0.01782821],
[0.02200521, 0.02907347, 0.03007176, 0.02923789, 0.02951017,
0.0296276 , 0.03567059, 0.03680559, 0.0290801 , 0.02854897,
0.04265354, 0.04267626, 0.04351283, 0.03135964, 0.03304242,
0.03188898, 0.03283039, 0.02025799, 0.02028695, 0.01782821,
0.02326837],
[0.02907347, 0.03007176, 0.02923789, 0.02951017, 0.0296276 ,
0.03567059, 0.03680559, 0.0290801 , 0.02854897, 0.04265354,
0.04267626, 0.04351283, 0.03135964, 0.03304242, 0.03188898,
0.03283039, 0.02025799, 0.02028695, 0.01782821, 0.02326837,
0.02220903],
[0.03007176, 0.02923789, 0.02951017, 0.0296276 , 0.03567059,
0.03680559, 0.0290801 , 0.02854897, 0.04265354, 0.04267626,
0.04351283, 0.03135964, 0.03304242, 0.03188898, 0.03283039,
0.02025799, 0.02028695, 0.01782821, 0.02326837, 0.02220903,
0.02302156],
[0.02923789, 0.02951017, 0.0296276 , 0.03567059, 0.03680559,
0.0290801 , 0.02854897, 0.04265354, 0.04267626, 0.04351283,
0.03135964, 0.03304242, 0.03188898, 0.03283039, 0.02025799,
0.02028695, 0.01782821, 0.02326837, 0.02220903, 0.02302156,
0.02008411],
[0.02951017, 0.0296276 , 0.03567059, 0.03680559, 0.0290801 ,
0.02854897, 0.04265354, 0.04267626, 0.04351283, 0.03135964,
0.03304242, 0.03188898, 0.03283039, 0.02025799, 0.02028695,
0.01782821, 0.02326837, 0.02220903, 0.02302156, 0.02008411,
0.02796862],
[0.0296276 , 0.03567059, 0.03680559, 0.0290801 , 0.02854897,
0.04265354, 0.04267626, 0.04351283, 0.03135964, 0.03304242,
0.03188898, 0.03283039, 0.02025799, 0.02028695, 0.01782821,
0.02326837, 0.02220903, 0.02302156, 0.02008411, 0.02796862,
0.02807775],
[0.03567059, 0.03680559, 0.0290801 , 0.02854897, 0.04265354,
0.04267626, 0.04351283, 0.03135964, 0.03304242, 0.03188898,
0.03283039, 0.02025799, 0.02028695, 0.01782821, 0.02326837,
0.02220903, 0.02302156, 0.02008411, 0.02796862, 0.02807775,
0.0280639 ],
[0.03680559, 0.0290801 , 0.02854897, 0.04265354, 0.04267626,
0.04351283, 0.03135964, 0.03304242, 0.03188898, 0.03283039,
0.02025799, 0.02028695, 0.01782821, 0.02326837, 0.02220903,
0.02302156, 0.02008411, 0.02796862, 0.02807775, 0.0280639 ,
0.01860246],
[0.0290801 , 0.02854897, 0.04265354, 0.04267626, 0.04351283,
0.03135964, 0.03304242, 0.03188898, 0.03283039, 0.02025799,
0.02028695, 0.01782821, 0.02326837, 0.02220903, 0.02302156,
0.02008411, 0.02796862, 0.02807775, 0.0280639 , 0.01860246,
0.02028465],
[0.02854897, 0.04265354, 0.04267626, 0.04351283, 0.03135964,
0.03304242, 0.03188898, 0.03283039, 0.02025799, 0.02028695,
0.01782821, 0.02326837, 0.02220903, 0.02302156, 0.02008411,
0.02796862, 0.02807775, 0.0280639 , 0.01860246, 0.02028465,
0.02152959],
[0.04265354, 0.04267626, 0.04351283, 0.03135964, 0.03304242,
0.03188898, 0.03283039, 0.02025799, 0.02028695, 0.01782821,
0.02326837, 0.02220903, 0.02302156, 0.02008411, 0.02796862,
0.02807775, 0.0280639 , 0.01860246, 0.02028465, 0.02152959,
0.0208019 ],
[0.04267626, 0.04351283, 0.03135964, 0.03304242, 0.03188898,
0.03283039, 0.02025799, 0.02028695, 0.01782821, 0.02326837,
0.02220903, 0.02302156, 0.02008411, 0.02796862, 0.02807775,
0.0280639 , 0.01860246, 0.02028465, 0.02152959, 0.0208019 ,
0.01353053],
[0.04351283, 0.03135964, 0.03304242, 0.03188898, 0.03283039,
0.02025799, 0.02028695, 0.01782821, 0.02326837, 0.02220903,
0.02302156, 0.02008411, 0.02796862, 0.02807775, 0.0280639 ,
0.01860246, 0.02028465, 0.02152959, 0.0208019 , 0.01353053,
0.01387194],
[0.03135964, 0.03304242, 0.03188898, 0.03283039, 0.02025799,
0.02028695, 0.01782821, 0.02326837, 0.02220903, 0.02302156,
0.02008411, 0.02796862, 0.02807775, 0.0280639 , 0.01860246,
0.02028465, 0.02152959, 0.0208019 , 0.01353053, 0.01387194,
0.01464025],
[0.03304242, 0.03188898, 0.03283039, 0.02025799, 0.02028695,
0.01782821, 0.02326837, 0.02220903, 0.02302156, 0.02008411,
0.02796862, 0.02807775, 0.0280639 , 0.01860246, 0.02028465,
0.02152959, 0.0208019 , 0.01353053, 0.01387194, 0.01464025,
0.00797008],
[0.03188898, 0.03283039, 0.02025799, 0.02028695, 0.01782821,
0.02326837, 0.02220903, 0.02302156, 0.02008411, 0.02796862,
0.02807775, 0.0280639 , 0.01860246, 0.02028465, 0.02152959,
0.0208019 , 0.01353053, 0.01387194, 0.01464025, 0.00797008,
0.00840223],
[0.03283039, 0.02025799, 0.02028695, 0.01782821, 0.02326837,
0.02220903, 0.02302156, 0.02008411, 0.02796862, 0.02807775,
0.0280639 , 0.01860246, 0.02028465, 0.02152959, 0.0208019 ,
0.01353053, 0.01387194, 0.01464025, 0.00797008, 0.00840223,
0.01262426],
[0.02025799, 0.02028695, 0.01782821, 0.02326837, 0.02220903,
0.02302156, 0.02008411, 0.02796862, 0.02807775, 0.0280639 ,
0.01860246, 0.02028465, 0.02152959, 0.0208019 , 0.01353053,
0.01387194, 0.01464025, 0.00797008, 0.00840223, 0.01262426,
0.0138589 ],
[0.02028695, 0.01782821, 0.02326837, 0.02220903, 0.02302156,
0.02008411, 0.02796862, 0.02807775, 0.0280639 , 0.01860246,
0.02028465, 0.02152959, 0.0208019 , 0.01353053, 0.01387194,
0.01464025, 0.00797008, 0.00840223, 0.01262426, 0.0138589 ,
0.01839749],
[0.01782821, 0.02326837, 0.02220903, 0.02302156, 0.02008411,
0.02796862, 0.02807775, 0.0280639 , 0.01860246, 0.02028465,
0.02152959, 0.0208019 , 0.01353053, 0.01387194, 0.01464025,
0.00797008, 0.00840223, 0.01262426, 0.0138589 , 0.01839749,
0.01681143]])
y_val_vola21
array([[0.02326837],
[0.02220903],
[0.02302156],
[0.02008411],
[0.02796862],
[0.02807775],
[0.0280639 ],
[0.01860246],
[0.02028465],
[0.02152959],
[0.0208019 ],
[0.01353053],
[0.01387194],
[0.01464025],
[0.00797008],
[0.00840223],
[0.01262426],
[0.0138589 ],
[0.01839749],
[0.01681143],
[0.01306726]])
y_test_vola21
array([[0.01229862],
[0.0154797 ],
[0.01722162],
[0.01886575],
[0.01710777],
[0.01939611],
[0.0194152 ],
[0.01726162],
[0.01673203],
[0.00799087],
[0.00883617],
[0.00904375],
[0.01135615],
[0.01073662],
[0.02285338],
[0.0273388 ],
[0.03929255],
[0.04112336],
[0.03795584],
[0.04029001],
[0.03913681]])
X_test_vola21
array([[0.02326837, 0.02220903, 0.02302156, 0.02008411, 0.02796862,
0.02807775, 0.0280639 , 0.01860246, 0.02028465, 0.02152959,
0.0208019 , 0.01353053, 0.01387194, 0.01464025, 0.00797008,
0.00840223, 0.01262426, 0.0138589 , 0.01839749, 0.01681143,
0.01306726],
[0.02220903, 0.02302156, 0.02008411, 0.02796862, 0.02807775,
0.0280639 , 0.01860246, 0.02028465, 0.02152959, 0.0208019 ,
0.01353053, 0.01387194, 0.01464025, 0.00797008, 0.00840223,
0.01262426, 0.0138589 , 0.01839749, 0.01681143, 0.01306726,
0.01229862],
[0.02302156, 0.02008411, 0.02796862, 0.02807775, 0.0280639 ,
0.01860246, 0.02028465, 0.02152959, 0.0208019 , 0.01353053,
0.01387194, 0.01464025, 0.00797008, 0.00840223, 0.01262426,
0.0138589 , 0.01839749, 0.01681143, 0.01306726, 0.01229862,
0.0154797 ],
[0.02008411, 0.02796862, 0.02807775, 0.0280639 , 0.01860246,
0.02028465, 0.02152959, 0.0208019 , 0.01353053, 0.01387194,
0.01464025, 0.00797008, 0.00840223, 0.01262426, 0.0138589 ,
0.01839749, 0.01681143, 0.01306726, 0.01229862, 0.0154797 ,
0.01722162],
[0.02796862, 0.02807775, 0.0280639 , 0.01860246, 0.02028465,
0.02152959, 0.0208019 , 0.01353053, 0.01387194, 0.01464025,
0.00797008, 0.00840223, 0.01262426, 0.0138589 , 0.01839749,
0.01681143, 0.01306726, 0.01229862, 0.0154797 , 0.01722162,
0.01886575],
[0.02807775, 0.0280639 , 0.01860246, 0.02028465, 0.02152959,
0.0208019 , 0.01353053, 0.01387194, 0.01464025, 0.00797008,
0.00840223, 0.01262426, 0.0138589 , 0.01839749, 0.01681143,
0.01306726, 0.01229862, 0.0154797 , 0.01722162, 0.01886575,
0.01710777],
[0.0280639 , 0.01860246, 0.02028465, 0.02152959, 0.0208019 ,
0.01353053, 0.01387194, 0.01464025, 0.00797008, 0.00840223,
0.01262426, 0.0138589 , 0.01839749, 0.01681143, 0.01306726,
0.01229862, 0.0154797 , 0.01722162, 0.01886575, 0.01710777,
0.01939611],
[0.01860246, 0.02028465, 0.02152959, 0.0208019 , 0.01353053,
0.01387194, 0.01464025, 0.00797008, 0.00840223, 0.01262426,
0.0138589 , 0.01839749, 0.01681143, 0.01306726, 0.01229862,
0.0154797 , 0.01722162, 0.01886575, 0.01710777, 0.01939611,
0.0194152 ],
[0.02028465, 0.02152959, 0.0208019 , 0.01353053, 0.01387194,
0.01464025, 0.00797008, 0.00840223, 0.01262426, 0.0138589 ,
0.01839749, 0.01681143, 0.01306726, 0.01229862, 0.0154797 ,
0.01722162, 0.01886575, 0.01710777, 0.01939611, 0.0194152 ,
0.01726162],
[0.02152959, 0.0208019 , 0.01353053, 0.01387194, 0.01464025,
0.00797008, 0.00840223, 0.01262426, 0.0138589 , 0.01839749,
0.01681143, 0.01306726, 0.01229862, 0.0154797 , 0.01722162,
0.01886575, 0.01710777, 0.01939611, 0.0194152 , 0.01726162,
0.01673203],
[0.0208019 , 0.01353053, 0.01387194, 0.01464025, 0.00797008,
0.00840223, 0.01262426, 0.0138589 , 0.01839749, 0.01681143,
0.01306726, 0.01229862, 0.0154797 , 0.01722162, 0.01886575,
0.01710777, 0.01939611, 0.0194152 , 0.01726162, 0.01673203,
0.00799087],
[0.01353053, 0.01387194, 0.01464025, 0.00797008, 0.00840223,
0.01262426, 0.0138589 , 0.01839749, 0.01681143, 0.01306726,
0.01229862, 0.0154797 , 0.01722162, 0.01886575, 0.01710777,
0.01939611, 0.0194152 , 0.01726162, 0.01673203, 0.00799087,
0.00883617],
[0.01387194, 0.01464025, 0.00797008, 0.00840223, 0.01262426,
0.0138589 , 0.01839749, 0.01681143, 0.01306726, 0.01229862,
0.0154797 , 0.01722162, 0.01886575, 0.01710777, 0.01939611,
0.0194152 , 0.01726162, 0.01673203, 0.00799087, 0.00883617,
0.00904375],
[0.01464025, 0.00797008, 0.00840223, 0.01262426, 0.0138589 ,
0.01839749, 0.01681143, 0.01306726, 0.01229862, 0.0154797 ,
0.01722162, 0.01886575, 0.01710777, 0.01939611, 0.0194152 ,
0.01726162, 0.01673203, 0.00799087, 0.00883617, 0.00904375,
0.01135615],
[0.00797008, 0.00840223, 0.01262426, 0.0138589 , 0.01839749,
0.01681143, 0.01306726, 0.01229862, 0.0154797 , 0.01722162,
0.01886575, 0.01710777, 0.01939611, 0.0194152 , 0.01726162,
0.01673203, 0.00799087, 0.00883617, 0.00904375, 0.01135615,
0.01073662],
[0.00840223, 0.01262426, 0.0138589 , 0.01839749, 0.01681143,
0.01306726, 0.01229862, 0.0154797 , 0.01722162, 0.01886575,
0.01710777, 0.01939611, 0.0194152 , 0.01726162, 0.01673203,
0.00799087, 0.00883617, 0.00904375, 0.01135615, 0.01073662,
0.02285338],
[0.01262426, 0.0138589 , 0.01839749, 0.01681143, 0.01306726,
0.01229862, 0.0154797 , 0.01722162, 0.01886575, 0.01710777,
0.01939611, 0.0194152 , 0.01726162, 0.01673203, 0.00799087,
0.00883617, 0.00904375, 0.01135615, 0.01073662, 0.02285338,
0.0273388 ],
[0.0138589 , 0.01839749, 0.01681143, 0.01306726, 0.01229862,
0.0154797 , 0.01722162, 0.01886575, 0.01710777, 0.01939611,
0.0194152 , 0.01726162, 0.01673203, 0.00799087, 0.00883617,
0.00904375, 0.01135615, 0.01073662, 0.02285338, 0.0273388 ,
0.03929255],
[0.01839749, 0.01681143, 0.01306726, 0.01229862, 0.0154797 ,
0.01722162, 0.01886575, 0.01710777, 0.01939611, 0.0194152 ,
0.01726162, 0.01673203, 0.00799087, 0.00883617, 0.00904375,
0.01135615, 0.01073662, 0.02285338, 0.0273388 , 0.03929255,
0.04112336],
[0.01681143, 0.01306726, 0.01229862, 0.0154797 , 0.01722162,
0.01886575, 0.01710777, 0.01939611, 0.0194152 , 0.01726162,
0.01673203, 0.00799087, 0.00883617, 0.00904375, 0.01135615,
0.01073662, 0.02285338, 0.0273388 , 0.03929255, 0.04112336,
0.03795584],
[0.01306726, 0.01229862, 0.0154797 , 0.01722162, 0.01886575,
0.01710777, 0.01939611, 0.0194152 , 0.01726162, 0.01673203,
0.00799087, 0.00883617, 0.00904375, 0.01135615, 0.01073662,
0.02285338, 0.0273388 , 0.03929255, 0.04112336, 0.03795584,
0.04029001]])
Horizonte de 28 días (\(\tau=28\))
X_train_vola28, y_train_vola28, X_val_vola28, y_val_vola28, X_test_vola28, y_test_vola28 = create_time_series_datasets(df_1_st,'Volatilidad_7',tau28)
# Imprimir las dimensiones
print("X_train shape:", X_train_vola28.shape)
print("y_train shape:", y_train_vola28.shape)
print("X_val shape:", X_val_vola28.shape)
print("y_val shape:", y_val_vola28.shape)
print("X_test shape:", X_test_vola28.shape)
print("y_test shape:", y_test_vola28.shape)
X_train shape: (4887, 28)
y_train shape: (4887, 1)
X_val shape: (28, 28)
y_val shape: (28, 1)
X_test shape: (28, 28)
y_test shape: (28, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 28\).
X_train_vola28
array([[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
...,
[0.01249856, 0.0138433 , 0.01222517, ..., 0.01926133, 0.01637862,
0.01765051],
[0.0138433 , 0.01222517, 0.01787648, ..., 0.01637862, 0.01765051,
0.01760648],
[0.01222517, 0.01787648, 0.02157085, ..., 0.01765051, 0.01760648,
0.01784617]])
y_train_vola28
array([[0. ],
[0. ],
[0. ],
...,
[0.01760648],
[0.01784617],
[0.0172682 ]])
X_val_vola28
array([[0.01787648, 0.02157085, 0.02298662, 0.02443874, 0.02431325,
0.02676837, 0.02725207, 0.03289862, 0.02447018, 0.02922022,
0.0290243 , 0.02868688, 0.02918397, 0.0296771 , 0.02290491,
0.0235329 , 0.02343 , 0.02344032, 0.01989328, 0.02045367,
0.01950276, 0.01660156, 0.01926133, 0.01637862, 0.01765051,
0.01760648, 0.01784617, 0.0172682 ],
[0.02157085, 0.02298662, 0.02443874, 0.02431325, 0.02676837,
0.02725207, 0.03289862, 0.02447018, 0.02922022, 0.0290243 ,
0.02868688, 0.02918397, 0.0296771 , 0.02290491, 0.0235329 ,
0.02343 , 0.02344032, 0.01989328, 0.02045367, 0.01950276,
0.01660156, 0.01926133, 0.01637862, 0.01765051, 0.01760648,
0.01784617, 0.0172682 , 0.02463123],
[0.02298662, 0.02443874, 0.02431325, 0.02676837, 0.02725207,
0.03289862, 0.02447018, 0.02922022, 0.0290243 , 0.02868688,
0.02918397, 0.0296771 , 0.02290491, 0.0235329 , 0.02343 ,
0.02344032, 0.01989328, 0.02045367, 0.01950276, 0.01660156,
0.01926133, 0.01637862, 0.01765051, 0.01760648, 0.01784617,
0.0172682 , 0.02463123, 0.02200521],
[0.02443874, 0.02431325, 0.02676837, 0.02725207, 0.03289862,
0.02447018, 0.02922022, 0.0290243 , 0.02868688, 0.02918397,
0.0296771 , 0.02290491, 0.0235329 , 0.02343 , 0.02344032,
0.01989328, 0.02045367, 0.01950276, 0.01660156, 0.01926133,
0.01637862, 0.01765051, 0.01760648, 0.01784617, 0.0172682 ,
0.02463123, 0.02200521, 0.02907347],
[0.02431325, 0.02676837, 0.02725207, 0.03289862, 0.02447018,
0.02922022, 0.0290243 , 0.02868688, 0.02918397, 0.0296771 ,
0.02290491, 0.0235329 , 0.02343 , 0.02344032, 0.01989328,
0.02045367, 0.01950276, 0.01660156, 0.01926133, 0.01637862,
0.01765051, 0.01760648, 0.01784617, 0.0172682 , 0.02463123,
0.02200521, 0.02907347, 0.03007176],
[0.02676837, 0.02725207, 0.03289862, 0.02447018, 0.02922022,
0.0290243 , 0.02868688, 0.02918397, 0.0296771 , 0.02290491,
0.0235329 , 0.02343 , 0.02344032, 0.01989328, 0.02045367,
0.01950276, 0.01660156, 0.01926133, 0.01637862, 0.01765051,
0.01760648, 0.01784617, 0.0172682 , 0.02463123, 0.02200521,
0.02907347, 0.03007176, 0.02923789],
[0.02725207, 0.03289862, 0.02447018, 0.02922022, 0.0290243 ,
0.02868688, 0.02918397, 0.0296771 , 0.02290491, 0.0235329 ,
0.02343 , 0.02344032, 0.01989328, 0.02045367, 0.01950276,
0.01660156, 0.01926133, 0.01637862, 0.01765051, 0.01760648,
0.01784617, 0.0172682 , 0.02463123, 0.02200521, 0.02907347,
0.03007176, 0.02923789, 0.02951017],
[0.03289862, 0.02447018, 0.02922022, 0.0290243 , 0.02868688,
0.02918397, 0.0296771 , 0.02290491, 0.0235329 , 0.02343 ,
0.02344032, 0.01989328, 0.02045367, 0.01950276, 0.01660156,
0.01926133, 0.01637862, 0.01765051, 0.01760648, 0.01784617,
0.0172682 , 0.02463123, 0.02200521, 0.02907347, 0.03007176,
0.02923789, 0.02951017, 0.0296276 ],
[0.02447018, 0.02922022, 0.0290243 , 0.02868688, 0.02918397,
0.0296771 , 0.02290491, 0.0235329 , 0.02343 , 0.02344032,
0.01989328, 0.02045367, 0.01950276, 0.01660156, 0.01926133,
0.01637862, 0.01765051, 0.01760648, 0.01784617, 0.0172682 ,
0.02463123, 0.02200521, 0.02907347, 0.03007176, 0.02923789,
0.02951017, 0.0296276 , 0.03567059],
[0.02922022, 0.0290243 , 0.02868688, 0.02918397, 0.0296771 ,
0.02290491, 0.0235329 , 0.02343 , 0.02344032, 0.01989328,
0.02045367, 0.01950276, 0.01660156, 0.01926133, 0.01637862,
0.01765051, 0.01760648, 0.01784617, 0.0172682 , 0.02463123,
0.02200521, 0.02907347, 0.03007176, 0.02923789, 0.02951017,
0.0296276 , 0.03567059, 0.03680559],
[0.0290243 , 0.02868688, 0.02918397, 0.0296771 , 0.02290491,
0.0235329 , 0.02343 , 0.02344032, 0.01989328, 0.02045367,
0.01950276, 0.01660156, 0.01926133, 0.01637862, 0.01765051,
0.01760648, 0.01784617, 0.0172682 , 0.02463123, 0.02200521,
0.02907347, 0.03007176, 0.02923789, 0.02951017, 0.0296276 ,
0.03567059, 0.03680559, 0.0290801 ],
[0.02868688, 0.02918397, 0.0296771 , 0.02290491, 0.0235329 ,
0.02343 , 0.02344032, 0.01989328, 0.02045367, 0.01950276,
0.01660156, 0.01926133, 0.01637862, 0.01765051, 0.01760648,
0.01784617, 0.0172682 , 0.02463123, 0.02200521, 0.02907347,
0.03007176, 0.02923789, 0.02951017, 0.0296276 , 0.03567059,
0.03680559, 0.0290801 , 0.02854897],
[0.02918397, 0.0296771 , 0.02290491, 0.0235329 , 0.02343 ,
0.02344032, 0.01989328, 0.02045367, 0.01950276, 0.01660156,
0.01926133, 0.01637862, 0.01765051, 0.01760648, 0.01784617,
0.0172682 , 0.02463123, 0.02200521, 0.02907347, 0.03007176,
0.02923789, 0.02951017, 0.0296276 , 0.03567059, 0.03680559,
0.0290801 , 0.02854897, 0.04265354],
[0.0296771 , 0.02290491, 0.0235329 , 0.02343 , 0.02344032,
0.01989328, 0.02045367, 0.01950276, 0.01660156, 0.01926133,
0.01637862, 0.01765051, 0.01760648, 0.01784617, 0.0172682 ,
0.02463123, 0.02200521, 0.02907347, 0.03007176, 0.02923789,
0.02951017, 0.0296276 , 0.03567059, 0.03680559, 0.0290801 ,
0.02854897, 0.04265354, 0.04267626],
[0.02290491, 0.0235329 , 0.02343 , 0.02344032, 0.01989328,
0.02045367, 0.01950276, 0.01660156, 0.01926133, 0.01637862,
0.01765051, 0.01760648, 0.01784617, 0.0172682 , 0.02463123,
0.02200521, 0.02907347, 0.03007176, 0.02923789, 0.02951017,
0.0296276 , 0.03567059, 0.03680559, 0.0290801 , 0.02854897,
0.04265354, 0.04267626, 0.04351283],
[0.0235329 , 0.02343 , 0.02344032, 0.01989328, 0.02045367,
0.01950276, 0.01660156, 0.01926133, 0.01637862, 0.01765051,
0.01760648, 0.01784617, 0.0172682 , 0.02463123, 0.02200521,
0.02907347, 0.03007176, 0.02923789, 0.02951017, 0.0296276 ,
0.03567059, 0.03680559, 0.0290801 , 0.02854897, 0.04265354,
0.04267626, 0.04351283, 0.03135964],
[0.02343 , 0.02344032, 0.01989328, 0.02045367, 0.01950276,
0.01660156, 0.01926133, 0.01637862, 0.01765051, 0.01760648,
0.01784617, 0.0172682 , 0.02463123, 0.02200521, 0.02907347,
0.03007176, 0.02923789, 0.02951017, 0.0296276 , 0.03567059,
0.03680559, 0.0290801 , 0.02854897, 0.04265354, 0.04267626,
0.04351283, 0.03135964, 0.03304242],
[0.02344032, 0.01989328, 0.02045367, 0.01950276, 0.01660156,
0.01926133, 0.01637862, 0.01765051, 0.01760648, 0.01784617,
0.0172682 , 0.02463123, 0.02200521, 0.02907347, 0.03007176,
0.02923789, 0.02951017, 0.0296276 , 0.03567059, 0.03680559,
0.0290801 , 0.02854897, 0.04265354, 0.04267626, 0.04351283,
0.03135964, 0.03304242, 0.03188898],
[0.01989328, 0.02045367, 0.01950276, 0.01660156, 0.01926133,
0.01637862, 0.01765051, 0.01760648, 0.01784617, 0.0172682 ,
0.02463123, 0.02200521, 0.02907347, 0.03007176, 0.02923789,
0.02951017, 0.0296276 , 0.03567059, 0.03680559, 0.0290801 ,
0.02854897, 0.04265354, 0.04267626, 0.04351283, 0.03135964,
0.03304242, 0.03188898, 0.03283039],
[0.02045367, 0.01950276, 0.01660156, 0.01926133, 0.01637862,
0.01765051, 0.01760648, 0.01784617, 0.0172682 , 0.02463123,
0.02200521, 0.02907347, 0.03007176, 0.02923789, 0.02951017,
0.0296276 , 0.03567059, 0.03680559, 0.0290801 , 0.02854897,
0.04265354, 0.04267626, 0.04351283, 0.03135964, 0.03304242,
0.03188898, 0.03283039, 0.02025799],
[0.01950276, 0.01660156, 0.01926133, 0.01637862, 0.01765051,
0.01760648, 0.01784617, 0.0172682 , 0.02463123, 0.02200521,
0.02907347, 0.03007176, 0.02923789, 0.02951017, 0.0296276 ,
0.03567059, 0.03680559, 0.0290801 , 0.02854897, 0.04265354,
0.04267626, 0.04351283, 0.03135964, 0.03304242, 0.03188898,
0.03283039, 0.02025799, 0.02028695],
[0.01660156, 0.01926133, 0.01637862, 0.01765051, 0.01760648,
0.01784617, 0.0172682 , 0.02463123, 0.02200521, 0.02907347,
0.03007176, 0.02923789, 0.02951017, 0.0296276 , 0.03567059,
0.03680559, 0.0290801 , 0.02854897, 0.04265354, 0.04267626,
0.04351283, 0.03135964, 0.03304242, 0.03188898, 0.03283039,
0.02025799, 0.02028695, 0.01782821],
[0.01926133, 0.01637862, 0.01765051, 0.01760648, 0.01784617,
0.0172682 , 0.02463123, 0.02200521, 0.02907347, 0.03007176,
0.02923789, 0.02951017, 0.0296276 , 0.03567059, 0.03680559,
0.0290801 , 0.02854897, 0.04265354, 0.04267626, 0.04351283,
0.03135964, 0.03304242, 0.03188898, 0.03283039, 0.02025799,
0.02028695, 0.01782821, 0.02326837],
[0.01637862, 0.01765051, 0.01760648, 0.01784617, 0.0172682 ,
0.02463123, 0.02200521, 0.02907347, 0.03007176, 0.02923789,
0.02951017, 0.0296276 , 0.03567059, 0.03680559, 0.0290801 ,
0.02854897, 0.04265354, 0.04267626, 0.04351283, 0.03135964,
0.03304242, 0.03188898, 0.03283039, 0.02025799, 0.02028695,
0.01782821, 0.02326837, 0.02220903],
[0.01765051, 0.01760648, 0.01784617, 0.0172682 , 0.02463123,
0.02200521, 0.02907347, 0.03007176, 0.02923789, 0.02951017,
0.0296276 , 0.03567059, 0.03680559, 0.0290801 , 0.02854897,
0.04265354, 0.04267626, 0.04351283, 0.03135964, 0.03304242,
0.03188898, 0.03283039, 0.02025799, 0.02028695, 0.01782821,
0.02326837, 0.02220903, 0.02302156],
[0.01760648, 0.01784617, 0.0172682 , 0.02463123, 0.02200521,
0.02907347, 0.03007176, 0.02923789, 0.02951017, 0.0296276 ,
0.03567059, 0.03680559, 0.0290801 , 0.02854897, 0.04265354,
0.04267626, 0.04351283, 0.03135964, 0.03304242, 0.03188898,
0.03283039, 0.02025799, 0.02028695, 0.01782821, 0.02326837,
0.02220903, 0.02302156, 0.02008411],
[0.01784617, 0.0172682 , 0.02463123, 0.02200521, 0.02907347,
0.03007176, 0.02923789, 0.02951017, 0.0296276 , 0.03567059,
0.03680559, 0.0290801 , 0.02854897, 0.04265354, 0.04267626,
0.04351283, 0.03135964, 0.03304242, 0.03188898, 0.03283039,
0.02025799, 0.02028695, 0.01782821, 0.02326837, 0.02220903,
0.02302156, 0.02008411, 0.02796862],
[0.0172682 , 0.02463123, 0.02200521, 0.02907347, 0.03007176,
0.02923789, 0.02951017, 0.0296276 , 0.03567059, 0.03680559,
0.0290801 , 0.02854897, 0.04265354, 0.04267626, 0.04351283,
0.03135964, 0.03304242, 0.03188898, 0.03283039, 0.02025799,
0.02028695, 0.01782821, 0.02326837, 0.02220903, 0.02302156,
0.02008411, 0.02796862, 0.02807775]])
y_val_vola28
array([[0.02463123],
[0.02200521],
[0.02907347],
[0.03007176],
[0.02923789],
[0.02951017],
[0.0296276 ],
[0.03567059],
[0.03680559],
[0.0290801 ],
[0.02854897],
[0.04265354],
[0.04267626],
[0.04351283],
[0.03135964],
[0.03304242],
[0.03188898],
[0.03283039],
[0.02025799],
[0.02028695],
[0.01782821],
[0.02326837],
[0.02220903],
[0.02302156],
[0.02008411],
[0.02796862],
[0.02807775],
[0.0280639 ]])
X_test_vola28
array([[0.02463123, 0.02200521, 0.02907347, 0.03007176, 0.02923789,
0.02951017, 0.0296276 , 0.03567059, 0.03680559, 0.0290801 ,
0.02854897, 0.04265354, 0.04267626, 0.04351283, 0.03135964,
0.03304242, 0.03188898, 0.03283039, 0.02025799, 0.02028695,
0.01782821, 0.02326837, 0.02220903, 0.02302156, 0.02008411,
0.02796862, 0.02807775, 0.0280639 ],
[0.02200521, 0.02907347, 0.03007176, 0.02923789, 0.02951017,
0.0296276 , 0.03567059, 0.03680559, 0.0290801 , 0.02854897,
0.04265354, 0.04267626, 0.04351283, 0.03135964, 0.03304242,
0.03188898, 0.03283039, 0.02025799, 0.02028695, 0.01782821,
0.02326837, 0.02220903, 0.02302156, 0.02008411, 0.02796862,
0.02807775, 0.0280639 , 0.01860246],
[0.02907347, 0.03007176, 0.02923789, 0.02951017, 0.0296276 ,
0.03567059, 0.03680559, 0.0290801 , 0.02854897, 0.04265354,
0.04267626, 0.04351283, 0.03135964, 0.03304242, 0.03188898,
0.03283039, 0.02025799, 0.02028695, 0.01782821, 0.02326837,
0.02220903, 0.02302156, 0.02008411, 0.02796862, 0.02807775,
0.0280639 , 0.01860246, 0.02028465],
[0.03007176, 0.02923789, 0.02951017, 0.0296276 , 0.03567059,
0.03680559, 0.0290801 , 0.02854897, 0.04265354, 0.04267626,
0.04351283, 0.03135964, 0.03304242, 0.03188898, 0.03283039,
0.02025799, 0.02028695, 0.01782821, 0.02326837, 0.02220903,
0.02302156, 0.02008411, 0.02796862, 0.02807775, 0.0280639 ,
0.01860246, 0.02028465, 0.02152959],
[0.02923789, 0.02951017, 0.0296276 , 0.03567059, 0.03680559,
0.0290801 , 0.02854897, 0.04265354, 0.04267626, 0.04351283,
0.03135964, 0.03304242, 0.03188898, 0.03283039, 0.02025799,
0.02028695, 0.01782821, 0.02326837, 0.02220903, 0.02302156,
0.02008411, 0.02796862, 0.02807775, 0.0280639 , 0.01860246,
0.02028465, 0.02152959, 0.0208019 ],
[0.02951017, 0.0296276 , 0.03567059, 0.03680559, 0.0290801 ,
0.02854897, 0.04265354, 0.04267626, 0.04351283, 0.03135964,
0.03304242, 0.03188898, 0.03283039, 0.02025799, 0.02028695,
0.01782821, 0.02326837, 0.02220903, 0.02302156, 0.02008411,
0.02796862, 0.02807775, 0.0280639 , 0.01860246, 0.02028465,
0.02152959, 0.0208019 , 0.01353053],
[0.0296276 , 0.03567059, 0.03680559, 0.0290801 , 0.02854897,
0.04265354, 0.04267626, 0.04351283, 0.03135964, 0.03304242,
0.03188898, 0.03283039, 0.02025799, 0.02028695, 0.01782821,
0.02326837, 0.02220903, 0.02302156, 0.02008411, 0.02796862,
0.02807775, 0.0280639 , 0.01860246, 0.02028465, 0.02152959,
0.0208019 , 0.01353053, 0.01387194],
[0.03567059, 0.03680559, 0.0290801 , 0.02854897, 0.04265354,
0.04267626, 0.04351283, 0.03135964, 0.03304242, 0.03188898,
0.03283039, 0.02025799, 0.02028695, 0.01782821, 0.02326837,
0.02220903, 0.02302156, 0.02008411, 0.02796862, 0.02807775,
0.0280639 , 0.01860246, 0.02028465, 0.02152959, 0.0208019 ,
0.01353053, 0.01387194, 0.01464025],
[0.03680559, 0.0290801 , 0.02854897, 0.04265354, 0.04267626,
0.04351283, 0.03135964, 0.03304242, 0.03188898, 0.03283039,
0.02025799, 0.02028695, 0.01782821, 0.02326837, 0.02220903,
0.02302156, 0.02008411, 0.02796862, 0.02807775, 0.0280639 ,
0.01860246, 0.02028465, 0.02152959, 0.0208019 , 0.01353053,
0.01387194, 0.01464025, 0.00797008],
[0.0290801 , 0.02854897, 0.04265354, 0.04267626, 0.04351283,
0.03135964, 0.03304242, 0.03188898, 0.03283039, 0.02025799,
0.02028695, 0.01782821, 0.02326837, 0.02220903, 0.02302156,
0.02008411, 0.02796862, 0.02807775, 0.0280639 , 0.01860246,
0.02028465, 0.02152959, 0.0208019 , 0.01353053, 0.01387194,
0.01464025, 0.00797008, 0.00840223],
[0.02854897, 0.04265354, 0.04267626, 0.04351283, 0.03135964,
0.03304242, 0.03188898, 0.03283039, 0.02025799, 0.02028695,
0.01782821, 0.02326837, 0.02220903, 0.02302156, 0.02008411,
0.02796862, 0.02807775, 0.0280639 , 0.01860246, 0.02028465,
0.02152959, 0.0208019 , 0.01353053, 0.01387194, 0.01464025,
0.00797008, 0.00840223, 0.01262426],
[0.04265354, 0.04267626, 0.04351283, 0.03135964, 0.03304242,
0.03188898, 0.03283039, 0.02025799, 0.02028695, 0.01782821,
0.02326837, 0.02220903, 0.02302156, 0.02008411, 0.02796862,
0.02807775, 0.0280639 , 0.01860246, 0.02028465, 0.02152959,
0.0208019 , 0.01353053, 0.01387194, 0.01464025, 0.00797008,
0.00840223, 0.01262426, 0.0138589 ],
[0.04267626, 0.04351283, 0.03135964, 0.03304242, 0.03188898,
0.03283039, 0.02025799, 0.02028695, 0.01782821, 0.02326837,
0.02220903, 0.02302156, 0.02008411, 0.02796862, 0.02807775,
0.0280639 , 0.01860246, 0.02028465, 0.02152959, 0.0208019 ,
0.01353053, 0.01387194, 0.01464025, 0.00797008, 0.00840223,
0.01262426, 0.0138589 , 0.01839749],
[0.04351283, 0.03135964, 0.03304242, 0.03188898, 0.03283039,
0.02025799, 0.02028695, 0.01782821, 0.02326837, 0.02220903,
0.02302156, 0.02008411, 0.02796862, 0.02807775, 0.0280639 ,
0.01860246, 0.02028465, 0.02152959, 0.0208019 , 0.01353053,
0.01387194, 0.01464025, 0.00797008, 0.00840223, 0.01262426,
0.0138589 , 0.01839749, 0.01681143],
[0.03135964, 0.03304242, 0.03188898, 0.03283039, 0.02025799,
0.02028695, 0.01782821, 0.02326837, 0.02220903, 0.02302156,
0.02008411, 0.02796862, 0.02807775, 0.0280639 , 0.01860246,
0.02028465, 0.02152959, 0.0208019 , 0.01353053, 0.01387194,
0.01464025, 0.00797008, 0.00840223, 0.01262426, 0.0138589 ,
0.01839749, 0.01681143, 0.01306726],
[0.03304242, 0.03188898, 0.03283039, 0.02025799, 0.02028695,
0.01782821, 0.02326837, 0.02220903, 0.02302156, 0.02008411,
0.02796862, 0.02807775, 0.0280639 , 0.01860246, 0.02028465,
0.02152959, 0.0208019 , 0.01353053, 0.01387194, 0.01464025,
0.00797008, 0.00840223, 0.01262426, 0.0138589 , 0.01839749,
0.01681143, 0.01306726, 0.01229862],
[0.03188898, 0.03283039, 0.02025799, 0.02028695, 0.01782821,
0.02326837, 0.02220903, 0.02302156, 0.02008411, 0.02796862,
0.02807775, 0.0280639 , 0.01860246, 0.02028465, 0.02152959,
0.0208019 , 0.01353053, 0.01387194, 0.01464025, 0.00797008,
0.00840223, 0.01262426, 0.0138589 , 0.01839749, 0.01681143,
0.01306726, 0.01229862, 0.0154797 ],
[0.03283039, 0.02025799, 0.02028695, 0.01782821, 0.02326837,
0.02220903, 0.02302156, 0.02008411, 0.02796862, 0.02807775,
0.0280639 , 0.01860246, 0.02028465, 0.02152959, 0.0208019 ,
0.01353053, 0.01387194, 0.01464025, 0.00797008, 0.00840223,
0.01262426, 0.0138589 , 0.01839749, 0.01681143, 0.01306726,
0.01229862, 0.0154797 , 0.01722162],
[0.02025799, 0.02028695, 0.01782821, 0.02326837, 0.02220903,
0.02302156, 0.02008411, 0.02796862, 0.02807775, 0.0280639 ,
0.01860246, 0.02028465, 0.02152959, 0.0208019 , 0.01353053,
0.01387194, 0.01464025, 0.00797008, 0.00840223, 0.01262426,
0.0138589 , 0.01839749, 0.01681143, 0.01306726, 0.01229862,
0.0154797 , 0.01722162, 0.01886575],
[0.02028695, 0.01782821, 0.02326837, 0.02220903, 0.02302156,
0.02008411, 0.02796862, 0.02807775, 0.0280639 , 0.01860246,
0.02028465, 0.02152959, 0.0208019 , 0.01353053, 0.01387194,
0.01464025, 0.00797008, 0.00840223, 0.01262426, 0.0138589 ,
0.01839749, 0.01681143, 0.01306726, 0.01229862, 0.0154797 ,
0.01722162, 0.01886575, 0.01710777],
[0.01782821, 0.02326837, 0.02220903, 0.02302156, 0.02008411,
0.02796862, 0.02807775, 0.0280639 , 0.01860246, 0.02028465,
0.02152959, 0.0208019 , 0.01353053, 0.01387194, 0.01464025,
0.00797008, 0.00840223, 0.01262426, 0.0138589 , 0.01839749,
0.01681143, 0.01306726, 0.01229862, 0.0154797 , 0.01722162,
0.01886575, 0.01710777, 0.01939611],
[0.02326837, 0.02220903, 0.02302156, 0.02008411, 0.02796862,
0.02807775, 0.0280639 , 0.01860246, 0.02028465, 0.02152959,
0.0208019 , 0.01353053, 0.01387194, 0.01464025, 0.00797008,
0.00840223, 0.01262426, 0.0138589 , 0.01839749, 0.01681143,
0.01306726, 0.01229862, 0.0154797 , 0.01722162, 0.01886575,
0.01710777, 0.01939611, 0.0194152 ],
[0.02220903, 0.02302156, 0.02008411, 0.02796862, 0.02807775,
0.0280639 , 0.01860246, 0.02028465, 0.02152959, 0.0208019 ,
0.01353053, 0.01387194, 0.01464025, 0.00797008, 0.00840223,
0.01262426, 0.0138589 , 0.01839749, 0.01681143, 0.01306726,
0.01229862, 0.0154797 , 0.01722162, 0.01886575, 0.01710777,
0.01939611, 0.0194152 , 0.01726162],
[0.02302156, 0.02008411, 0.02796862, 0.02807775, 0.0280639 ,
0.01860246, 0.02028465, 0.02152959, 0.0208019 , 0.01353053,
0.01387194, 0.01464025, 0.00797008, 0.00840223, 0.01262426,
0.0138589 , 0.01839749, 0.01681143, 0.01306726, 0.01229862,
0.0154797 , 0.01722162, 0.01886575, 0.01710777, 0.01939611,
0.0194152 , 0.01726162, 0.01673203],
[0.02008411, 0.02796862, 0.02807775, 0.0280639 , 0.01860246,
0.02028465, 0.02152959, 0.0208019 , 0.01353053, 0.01387194,
0.01464025, 0.00797008, 0.00840223, 0.01262426, 0.0138589 ,
0.01839749, 0.01681143, 0.01306726, 0.01229862, 0.0154797 ,
0.01722162, 0.01886575, 0.01710777, 0.01939611, 0.0194152 ,
0.01726162, 0.01673203, 0.00799087],
[0.02796862, 0.02807775, 0.0280639 , 0.01860246, 0.02028465,
0.02152959, 0.0208019 , 0.01353053, 0.01387194, 0.01464025,
0.00797008, 0.00840223, 0.01262426, 0.0138589 , 0.01839749,
0.01681143, 0.01306726, 0.01229862, 0.0154797 , 0.01722162,
0.01886575, 0.01710777, 0.01939611, 0.0194152 , 0.01726162,
0.01673203, 0.00799087, 0.00883617],
[0.02807775, 0.0280639 , 0.01860246, 0.02028465, 0.02152959,
0.0208019 , 0.01353053, 0.01387194, 0.01464025, 0.00797008,
0.00840223, 0.01262426, 0.0138589 , 0.01839749, 0.01681143,
0.01306726, 0.01229862, 0.0154797 , 0.01722162, 0.01886575,
0.01710777, 0.01939611, 0.0194152 , 0.01726162, 0.01673203,
0.00799087, 0.00883617, 0.00904375],
[0.0280639 , 0.01860246, 0.02028465, 0.02152959, 0.0208019 ,
0.01353053, 0.01387194, 0.01464025, 0.00797008, 0.00840223,
0.01262426, 0.0138589 , 0.01839749, 0.01681143, 0.01306726,
0.01229862, 0.0154797 , 0.01722162, 0.01886575, 0.01710777,
0.01939611, 0.0194152 , 0.01726162, 0.01673203, 0.00799087,
0.00883617, 0.00904375, 0.01135615]])
y_test_vola28
array([[0.01860246],
[0.02028465],
[0.02152959],
[0.0208019 ],
[0.01353053],
[0.01387194],
[0.01464025],
[0.00797008],
[0.00840223],
[0.01262426],
[0.0138589 ],
[0.01839749],
[0.01681143],
[0.01306726],
[0.01229862],
[0.0154797 ],
[0.01722162],
[0.01886575],
[0.01710777],
[0.01939611],
[0.0194152 ],
[0.01726162],
[0.01673203],
[0.00799087],
[0.00883617],
[0.00904375],
[0.01135615],
[0.01073662]])
\(\omega = 14\)
Horizonte de 7 días (\(\tau=7\))
X_train_vola14_7, y_train_vola14_7, X_val_vola14_7, y_val_vola14_7, X_test_vola14_7, y_test_vola14_7 = create_time_series_datasets(df_1_st,'Volatilidad_14',tau7)
# Imprimir las dimensiones
print("X_train shape:", X_train_vola14_7.shape)
print("y_train shape:", y_train_vola14_7.shape)
print("X_val shape:", X_val_vola14_7.shape)
print("y_val shape:", y_val_vola14_7.shape)
print("X_test shape:", X_test_vola14_7.shape)
print("y_test shape:", y_test_vola14_7.shape)
X_train shape: (4971, 7)
y_train shape: (4971, 1)
X_val shape: (7, 7)
y_val shape: (7, 1)
X_test shape: (7, 7)
y_test shape: (7, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 7\).
X_train_vola14_7
array([[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
...,
[0.01627171, 0.01640381, 0.0163457 , ..., 0.02197152, 0.03071454,
0.03197917],
[0.01640381, 0.0163457 , 0.01949109, ..., 0.03071454, 0.03197917,
0.03195915],
[0.0163457 , 0.01949109, 0.02197152, ..., 0.03197917, 0.03195915,
0.03180936]])
y_train_vola14_7
array([[0. ],
[0. ],
[0. ],
...,
[0.03195915],
[0.03180936],
[0.03180612]])
X_val_vola14_7
array([[0.01949109, 0.02197152, 0.03071454, 0.03197917, 0.03195915,
0.03180936, 0.03180612],
[0.02197152, 0.03071454, 0.03197917, 0.03195915, 0.03180936,
0.03180612, 0.03581382],
[0.03071454, 0.03197917, 0.03195915, 0.03180936, 0.03180612,
0.03581382, 0.04257136],
[0.03197917, 0.03195915, 0.03180936, 0.03180612, 0.03581382,
0.04257136, 0.04236052],
[0.03195915, 0.03180936, 0.03180612, 0.03581382, 0.04257136,
0.04236052, 0.04161598],
[0.03180936, 0.03180612, 0.03581382, 0.04257136, 0.04236052,
0.04161598, 0.04064398],
[0.03180612, 0.03581382, 0.04257136, 0.04236052, 0.04161598,
0.04064398, 0.04094935]])
y_val_vola14_7
array([[0.03581382],
[0.04257136],
[0.04236052],
[0.04161598],
[0.04064398],
[0.04094935],
[0.04077978]])
X_test_vola14_7
array([[0.03581382, 0.04257136, 0.04236052, 0.04161598, 0.04064398,
0.04094935, 0.04077978],
[0.04257136, 0.04236052, 0.04161598, 0.04064398, 0.04094935,
0.04077978, 0.04034229],
[0.04236052, 0.04161598, 0.04064398, 0.04094935, 0.04077978,
0.04034229, 0.04032241],
[0.04161598, 0.04064398, 0.04094935, 0.04077978, 0.04034229,
0.04032241, 0.03365452],
[0.04064398, 0.04094935, 0.04077978, 0.04034229, 0.04032241,
0.03365452, 0.03382242],
[0.04094935, 0.04077978, 0.04034229, 0.04032241, 0.03365452,
0.03382242, 0.03522428],
[0.04077978, 0.04034229, 0.04032241, 0.03365452, 0.03382242,
0.03522428, 0.03954966]])
y_test_vola14_7
array([[0.04034229],
[0.04032241],
[0.03365452],
[0.03382242],
[0.03522428],
[0.03954966],
[0.04104404]])
Horizonte de 14 días (\(\tau=14\))
X_train_vola14_14, y_train_vola14_14, X_val_vola14_14, y_val_vola14_14, X_test_vola14_14, y_test_vola14_14 = create_time_series_datasets(df_1_st,'Volatilidad_14',tau14)
# Imprimir las dimensiones
print("X_train shape:", X_train_vola14_14.shape)
print("y_train shape:", y_train_vola14_14.shape)
print("X_val shape:", X_val_vola14_14.shape)
print("y_val shape:", y_val_vola14_14.shape)
print("X_test shape:", X_test_vola14_14.shape)
print("y_test shape:", y_test_vola14_14.shape)
X_train shape: (4943, 14)
y_train shape: (4943, 1)
X_val shape: (14, 14)
y_val shape: (14, 1)
X_test shape: (14, 14)
y_test shape: (14, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 14\).
X_train_vola14_14
array([[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
...,
[0.02358944, 0.02370629, 0.02261878, ..., 0.01584093, 0.01702626,
0.01709612],
[0.02370629, 0.02261878, 0.02353375, ..., 0.01702626, 0.01709612,
0.01605132],
[0.02261878, 0.02353375, 0.02333393, ..., 0.01709612, 0.01605132,
0.01609356]])
y_train_vola14_14
array([[0. ],
[0. ],
[0. ],
...,
[0.01605132],
[0.01609356],
[0.01576487]])
X_val_vola14_14
array([[0.02353375, 0.02333393, 0.02332778, 0.02125759, 0.0212016 ,
0.02128622, 0.0215041 , 0.01582952, 0.01584093, 0.01702626,
0.01709612, 0.01605132, 0.01609356, 0.01576487],
[0.02333393, 0.02332778, 0.02125759, 0.0212016 , 0.02128622,
0.0215041 , 0.01582952, 0.01584093, 0.01702626, 0.01709612,
0.01605132, 0.01609356, 0.01576487, 0.01621947],
[0.02332778, 0.02125759, 0.0212016 , 0.02128622, 0.0215041 ,
0.01582952, 0.01584093, 0.01702626, 0.01709612, 0.01605132,
0.01609356, 0.01576487, 0.01621947, 0.01592497],
[0.02125759, 0.0212016 , 0.02128622, 0.0215041 , 0.01582952,
0.01584093, 0.01702626, 0.01709612, 0.01605132, 0.01609356,
0.01576487, 0.01621947, 0.01592497, 0.01688981],
[0.0212016 , 0.02128622, 0.0215041 , 0.01582952, 0.01584093,
0.01702626, 0.01709612, 0.01605132, 0.01609356, 0.01576487,
0.01621947, 0.01592497, 0.01688981, 0.01718203],
[0.02128622, 0.0215041 , 0.01582952, 0.01584093, 0.01702626,
0.01709612, 0.01605132, 0.01609356, 0.01576487, 0.01621947,
0.01592497, 0.01688981, 0.01718203, 0.01709837],
[0.0215041 , 0.01582952, 0.01584093, 0.01702626, 0.01709612,
0.01605132, 0.01609356, 0.01576487, 0.01621947, 0.01592497,
0.01688981, 0.01718203, 0.01709837, 0.0175492 ],
[0.01582952, 0.01584093, 0.01702626, 0.01709612, 0.01605132,
0.01609356, 0.01576487, 0.01621947, 0.01592497, 0.01688981,
0.01718203, 0.01709837, 0.0175492 , 0.01631064],
[0.01584093, 0.01702626, 0.01709612, 0.01605132, 0.01609356,
0.01576487, 0.01621947, 0.01592497, 0.01688981, 0.01718203,
0.01709837, 0.0175492 , 0.01631064, 0.01699268],
[0.01702626, 0.01709612, 0.01605132, 0.01609356, 0.01576487,
0.01621947, 0.01592497, 0.01688981, 0.01718203, 0.01709837,
0.0175492 , 0.01631064, 0.01699268, 0.01699004],
[0.01709612, 0.01605132, 0.01609356, 0.01576487, 0.01621947,
0.01592497, 0.01688981, 0.01718203, 0.01709837, 0.0175492 ,
0.01631064, 0.01699268, 0.01699004, 0.0173284 ],
[0.01605132, 0.01609356, 0.01576487, 0.01621947, 0.01592497,
0.01688981, 0.01718203, 0.01709837, 0.0175492 , 0.01631064,
0.01699268, 0.01699004, 0.0173284 , 0.01797874],
[0.01609356, 0.01576487, 0.01621947, 0.01592497, 0.01688981,
0.01718203, 0.01709837, 0.0175492 , 0.01631064, 0.01699268,
0.01699004, 0.0173284 , 0.01797874, 0.01627171],
[0.01576487, 0.01621947, 0.01592497, 0.01688981, 0.01718203,
0.01709837, 0.0175492 , 0.01631064, 0.01699268, 0.01699004,
0.0173284 , 0.01797874, 0.01627171, 0.01640381]])
y_val_vola14_14
array([[0.01621947],
[0.01592497],
[0.01688981],
[0.01718203],
[0.01709837],
[0.0175492 ],
[0.01631064],
[0.01699268],
[0.01699004],
[0.0173284 ],
[0.01797874],
[0.01627171],
[0.01640381],
[0.0163457 ]])
X_test_vola14_14
array([[0.01621947, 0.01592497, 0.01688981, 0.01718203, 0.01709837,
0.0175492 , 0.01631064, 0.01699268, 0.01699004, 0.0173284 ,
0.01797874, 0.01627171, 0.01640381, 0.0163457 ],
[0.01592497, 0.01688981, 0.01718203, 0.01709837, 0.0175492 ,
0.01631064, 0.01699268, 0.01699004, 0.0173284 , 0.01797874,
0.01627171, 0.01640381, 0.0163457 , 0.01949109],
[0.01688981, 0.01718203, 0.01709837, 0.0175492 , 0.01631064,
0.01699268, 0.01699004, 0.0173284 , 0.01797874, 0.01627171,
0.01640381, 0.0163457 , 0.01949109, 0.02197152],
[0.01718203, 0.01709837, 0.0175492 , 0.01631064, 0.01699268,
0.01699004, 0.0173284 , 0.01797874, 0.01627171, 0.01640381,
0.0163457 , 0.01949109, 0.02197152, 0.03071454],
[0.01709837, 0.0175492 , 0.01631064, 0.01699268, 0.01699004,
0.0173284 , 0.01797874, 0.01627171, 0.01640381, 0.0163457 ,
0.01949109, 0.02197152, 0.03071454, 0.03197917],
[0.0175492 , 0.01631064, 0.01699268, 0.01699004, 0.0173284 ,
0.01797874, 0.01627171, 0.01640381, 0.0163457 , 0.01949109,
0.02197152, 0.03071454, 0.03197917, 0.03195915],
[0.01631064, 0.01699268, 0.01699004, 0.0173284 , 0.01797874,
0.01627171, 0.01640381, 0.0163457 , 0.01949109, 0.02197152,
0.03071454, 0.03197917, 0.03195915, 0.03180936],
[0.01699268, 0.01699004, 0.0173284 , 0.01797874, 0.01627171,
0.01640381, 0.0163457 , 0.01949109, 0.02197152, 0.03071454,
0.03197917, 0.03195915, 0.03180936, 0.03180612],
[0.01699004, 0.0173284 , 0.01797874, 0.01627171, 0.01640381,
0.0163457 , 0.01949109, 0.02197152, 0.03071454, 0.03197917,
0.03195915, 0.03180936, 0.03180612, 0.03581382],
[0.0173284 , 0.01797874, 0.01627171, 0.01640381, 0.0163457 ,
0.01949109, 0.02197152, 0.03071454, 0.03197917, 0.03195915,
0.03180936, 0.03180612, 0.03581382, 0.04257136],
[0.01797874, 0.01627171, 0.01640381, 0.0163457 , 0.01949109,
0.02197152, 0.03071454, 0.03197917, 0.03195915, 0.03180936,
0.03180612, 0.03581382, 0.04257136, 0.04236052],
[0.01627171, 0.01640381, 0.0163457 , 0.01949109, 0.02197152,
0.03071454, 0.03197917, 0.03195915, 0.03180936, 0.03180612,
0.03581382, 0.04257136, 0.04236052, 0.04161598],
[0.01640381, 0.0163457 , 0.01949109, 0.02197152, 0.03071454,
0.03197917, 0.03195915, 0.03180936, 0.03180612, 0.03581382,
0.04257136, 0.04236052, 0.04161598, 0.04064398],
[0.0163457 , 0.01949109, 0.02197152, 0.03071454, 0.03197917,
0.03195915, 0.03180936, 0.03180612, 0.03581382, 0.04257136,
0.04236052, 0.04161598, 0.04064398, 0.04094935]])
y_test_vola14_14
array([[0.01949109],
[0.02197152],
[0.03071454],
[0.03197917],
[0.03195915],
[0.03180936],
[0.03180612],
[0.03581382],
[0.04257136],
[0.04236052],
[0.04161598],
[0.04064398],
[0.04094935],
[0.04077978]])
Horizonte de 21 días (\(\tau=21\))
X_train_vola14_21, y_train_vola14_21, X_val_vola14_21, y_val_vola14_21, X_test_vola14_21, y_test_vola14_21 = create_time_series_datasets(df_1_st,'Volatilidad_14',tau21)
# Imprimir las dimensiones
print("X_train shape:", X_train_vola14_21.shape)
print("y_train shape:", y_train_vola14_21.shape)
print("X_val shape:", X_val_vola14_21.shape)
print("y_val shape:", y_val_vola14_21.shape)
print("X_test shape:", X_test_vola14_21.shape)
print("y_test shape:", y_test_vola14_21.shape)
X_train shape: (4915, 21)
y_train shape: (4915, 1)
X_val shape: (21, 21)
y_val shape: (21, 1)
X_test shape: (21, 21)
y_test shape: (21, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 21\).
X_train_vola14_21
array([[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
...,
[0.01928971, 0.0191655 , 0.01816356, ..., 0.03430583, 0.03192901,
0.03190819],
[0.0191655 , 0.01816356, 0.02018555, ..., 0.03192901, 0.03190819,
0.03207988],
[0.01816356, 0.02018555, 0.0202123 , ..., 0.03190819, 0.03207988,
0.03210488]])
y_train_vola14_21
array([[0. ],
[0. ],
[0. ],
...,
[0.03207988],
[0.03210488],
[0.03209828]])
X_val_vola14_21
array([[0.02018555, 0.0202123 , 0.02267684, 0.02421744, 0.02421361,
0.02418457, 0.02367551, 0.02967658, 0.02920221, 0.02889174,
0.02818356, 0.0355796 , 0.03558185, 0.03632934, 0.03439932,
0.03430583, 0.03192901, 0.03190819, 0.03207988, 0.03210488,
0.03209828],
[0.0202123 , 0.02267684, 0.02421744, 0.02421361, 0.02418457,
0.02367551, 0.02967658, 0.02920221, 0.02889174, 0.02818356,
0.0355796 , 0.03558185, 0.03632934, 0.03439932, 0.03430583,
0.03192901, 0.03190819, 0.03207988, 0.03210488, 0.03209828,
0.02659769],
[0.02267684, 0.02421744, 0.02421361, 0.02418457, 0.02367551,
0.02967658, 0.02920221, 0.02889174, 0.02818356, 0.0355796 ,
0.03558185, 0.03632934, 0.03439932, 0.03430583, 0.03192901,
0.03190819, 0.03207988, 0.03210488, 0.03209828, 0.02659769,
0.02706552],
[0.02421744, 0.02421361, 0.02418457, 0.02367551, 0.02967658,
0.02920221, 0.02889174, 0.02818356, 0.0355796 , 0.03558185,
0.03632934, 0.03439932, 0.03430583, 0.03192901, 0.03190819,
0.03207988, 0.03210488, 0.03209828, 0.02659769, 0.02706552,
0.02676048],
[0.02421361, 0.02418457, 0.02367551, 0.02967658, 0.02920221,
0.02889174, 0.02818356, 0.0355796 , 0.03558185, 0.03632934,
0.03439932, 0.03430583, 0.03192901, 0.03190819, 0.03207988,
0.03210488, 0.03209828, 0.02659769, 0.02706552, 0.02676048,
0.02679602],
[0.02418457, 0.02367551, 0.02967658, 0.02920221, 0.02889174,
0.02818356, 0.0355796 , 0.03558185, 0.03632934, 0.03439932,
0.03430583, 0.03192901, 0.03190819, 0.03207988, 0.03210488,
0.03209828, 0.02659769, 0.02706552, 0.02676048, 0.02679602,
0.02358944],
[0.02367551, 0.02967658, 0.02920221, 0.02889174, 0.02818356,
0.0355796 , 0.03558185, 0.03632934, 0.03439932, 0.03430583,
0.03192901, 0.03190819, 0.03207988, 0.03210488, 0.03209828,
0.02659769, 0.02706552, 0.02676048, 0.02679602, 0.02358944,
0.02370629],
[0.02967658, 0.02920221, 0.02889174, 0.02818356, 0.0355796 ,
0.03558185, 0.03632934, 0.03439932, 0.03430583, 0.03192901,
0.03190819, 0.03207988, 0.03210488, 0.03209828, 0.02659769,
0.02706552, 0.02676048, 0.02679602, 0.02358944, 0.02370629,
0.02261878],
[0.02920221, 0.02889174, 0.02818356, 0.0355796 , 0.03558185,
0.03632934, 0.03439932, 0.03430583, 0.03192901, 0.03190819,
0.03207988, 0.03210488, 0.03209828, 0.02659769, 0.02706552,
0.02676048, 0.02679602, 0.02358944, 0.02370629, 0.02261878,
0.02353375],
[0.02889174, 0.02818356, 0.0355796 , 0.03558185, 0.03632934,
0.03439932, 0.03430583, 0.03192901, 0.03190819, 0.03207988,
0.03210488, 0.03209828, 0.02659769, 0.02706552, 0.02676048,
0.02679602, 0.02358944, 0.02370629, 0.02261878, 0.02353375,
0.02333393],
[0.02818356, 0.0355796 , 0.03558185, 0.03632934, 0.03439932,
0.03430583, 0.03192901, 0.03190819, 0.03207988, 0.03210488,
0.03209828, 0.02659769, 0.02706552, 0.02676048, 0.02679602,
0.02358944, 0.02370629, 0.02261878, 0.02353375, 0.02333393,
0.02332778],
[0.0355796 , 0.03558185, 0.03632934, 0.03439932, 0.03430583,
0.03192901, 0.03190819, 0.03207988, 0.03210488, 0.03209828,
0.02659769, 0.02706552, 0.02676048, 0.02679602, 0.02358944,
0.02370629, 0.02261878, 0.02353375, 0.02333393, 0.02332778,
0.02125759],
[0.03558185, 0.03632934, 0.03439932, 0.03430583, 0.03192901,
0.03190819, 0.03207988, 0.03210488, 0.03209828, 0.02659769,
0.02706552, 0.02676048, 0.02679602, 0.02358944, 0.02370629,
0.02261878, 0.02353375, 0.02333393, 0.02332778, 0.02125759,
0.0212016 ],
[0.03632934, 0.03439932, 0.03430583, 0.03192901, 0.03190819,
0.03207988, 0.03210488, 0.03209828, 0.02659769, 0.02706552,
0.02676048, 0.02679602, 0.02358944, 0.02370629, 0.02261878,
0.02353375, 0.02333393, 0.02332778, 0.02125759, 0.0212016 ,
0.02128622],
[0.03439932, 0.03430583, 0.03192901, 0.03190819, 0.03207988,
0.03210488, 0.03209828, 0.02659769, 0.02706552, 0.02676048,
0.02679602, 0.02358944, 0.02370629, 0.02261878, 0.02353375,
0.02333393, 0.02332778, 0.02125759, 0.0212016 , 0.02128622,
0.0215041 ],
[0.03430583, 0.03192901, 0.03190819, 0.03207988, 0.03210488,
0.03209828, 0.02659769, 0.02706552, 0.02676048, 0.02679602,
0.02358944, 0.02370629, 0.02261878, 0.02353375, 0.02333393,
0.02332778, 0.02125759, 0.0212016 , 0.02128622, 0.0215041 ,
0.01582952],
[0.03192901, 0.03190819, 0.03207988, 0.03210488, 0.03209828,
0.02659769, 0.02706552, 0.02676048, 0.02679602, 0.02358944,
0.02370629, 0.02261878, 0.02353375, 0.02333393, 0.02332778,
0.02125759, 0.0212016 , 0.02128622, 0.0215041 , 0.01582952,
0.01584093],
[0.03190819, 0.03207988, 0.03210488, 0.03209828, 0.02659769,
0.02706552, 0.02676048, 0.02679602, 0.02358944, 0.02370629,
0.02261878, 0.02353375, 0.02333393, 0.02332778, 0.02125759,
0.0212016 , 0.02128622, 0.0215041 , 0.01582952, 0.01584093,
0.01702626],
[0.03207988, 0.03210488, 0.03209828, 0.02659769, 0.02706552,
0.02676048, 0.02679602, 0.02358944, 0.02370629, 0.02261878,
0.02353375, 0.02333393, 0.02332778, 0.02125759, 0.0212016 ,
0.02128622, 0.0215041 , 0.01582952, 0.01584093, 0.01702626,
0.01709612],
[0.03210488, 0.03209828, 0.02659769, 0.02706552, 0.02676048,
0.02679602, 0.02358944, 0.02370629, 0.02261878, 0.02353375,
0.02333393, 0.02332778, 0.02125759, 0.0212016 , 0.02128622,
0.0215041 , 0.01582952, 0.01584093, 0.01702626, 0.01709612,
0.01605132],
[0.03209828, 0.02659769, 0.02706552, 0.02676048, 0.02679602,
0.02358944, 0.02370629, 0.02261878, 0.02353375, 0.02333393,
0.02332778, 0.02125759, 0.0212016 , 0.02128622, 0.0215041 ,
0.01582952, 0.01584093, 0.01702626, 0.01709612, 0.01605132,
0.01609356]])
y_val_vola14_21
array([[0.02659769],
[0.02706552],
[0.02676048],
[0.02679602],
[0.02358944],
[0.02370629],
[0.02261878],
[0.02353375],
[0.02333393],
[0.02332778],
[0.02125759],
[0.0212016 ],
[0.02128622],
[0.0215041 ],
[0.01582952],
[0.01584093],
[0.01702626],
[0.01709612],
[0.01605132],
[0.01609356],
[0.01576487]])
X_test_vola14_21
array([[0.02659769, 0.02706552, 0.02676048, 0.02679602, 0.02358944,
0.02370629, 0.02261878, 0.02353375, 0.02333393, 0.02332778,
0.02125759, 0.0212016 , 0.02128622, 0.0215041 , 0.01582952,
0.01584093, 0.01702626, 0.01709612, 0.01605132, 0.01609356,
0.01576487],
[0.02706552, 0.02676048, 0.02679602, 0.02358944, 0.02370629,
0.02261878, 0.02353375, 0.02333393, 0.02332778, 0.02125759,
0.0212016 , 0.02128622, 0.0215041 , 0.01582952, 0.01584093,
0.01702626, 0.01709612, 0.01605132, 0.01609356, 0.01576487,
0.01621947],
[0.02676048, 0.02679602, 0.02358944, 0.02370629, 0.02261878,
0.02353375, 0.02333393, 0.02332778, 0.02125759, 0.0212016 ,
0.02128622, 0.0215041 , 0.01582952, 0.01584093, 0.01702626,
0.01709612, 0.01605132, 0.01609356, 0.01576487, 0.01621947,
0.01592497],
[0.02679602, 0.02358944, 0.02370629, 0.02261878, 0.02353375,
0.02333393, 0.02332778, 0.02125759, 0.0212016 , 0.02128622,
0.0215041 , 0.01582952, 0.01584093, 0.01702626, 0.01709612,
0.01605132, 0.01609356, 0.01576487, 0.01621947, 0.01592497,
0.01688981],
[0.02358944, 0.02370629, 0.02261878, 0.02353375, 0.02333393,
0.02332778, 0.02125759, 0.0212016 , 0.02128622, 0.0215041 ,
0.01582952, 0.01584093, 0.01702626, 0.01709612, 0.01605132,
0.01609356, 0.01576487, 0.01621947, 0.01592497, 0.01688981,
0.01718203],
[0.02370629, 0.02261878, 0.02353375, 0.02333393, 0.02332778,
0.02125759, 0.0212016 , 0.02128622, 0.0215041 , 0.01582952,
0.01584093, 0.01702626, 0.01709612, 0.01605132, 0.01609356,
0.01576487, 0.01621947, 0.01592497, 0.01688981, 0.01718203,
0.01709837],
[0.02261878, 0.02353375, 0.02333393, 0.02332778, 0.02125759,
0.0212016 , 0.02128622, 0.0215041 , 0.01582952, 0.01584093,
0.01702626, 0.01709612, 0.01605132, 0.01609356, 0.01576487,
0.01621947, 0.01592497, 0.01688981, 0.01718203, 0.01709837,
0.0175492 ],
[0.02353375, 0.02333393, 0.02332778, 0.02125759, 0.0212016 ,
0.02128622, 0.0215041 , 0.01582952, 0.01584093, 0.01702626,
0.01709612, 0.01605132, 0.01609356, 0.01576487, 0.01621947,
0.01592497, 0.01688981, 0.01718203, 0.01709837, 0.0175492 ,
0.01631064],
[0.02333393, 0.02332778, 0.02125759, 0.0212016 , 0.02128622,
0.0215041 , 0.01582952, 0.01584093, 0.01702626, 0.01709612,
0.01605132, 0.01609356, 0.01576487, 0.01621947, 0.01592497,
0.01688981, 0.01718203, 0.01709837, 0.0175492 , 0.01631064,
0.01699268],
[0.02332778, 0.02125759, 0.0212016 , 0.02128622, 0.0215041 ,
0.01582952, 0.01584093, 0.01702626, 0.01709612, 0.01605132,
0.01609356, 0.01576487, 0.01621947, 0.01592497, 0.01688981,
0.01718203, 0.01709837, 0.0175492 , 0.01631064, 0.01699268,
0.01699004],
[0.02125759, 0.0212016 , 0.02128622, 0.0215041 , 0.01582952,
0.01584093, 0.01702626, 0.01709612, 0.01605132, 0.01609356,
0.01576487, 0.01621947, 0.01592497, 0.01688981, 0.01718203,
0.01709837, 0.0175492 , 0.01631064, 0.01699268, 0.01699004,
0.0173284 ],
[0.0212016 , 0.02128622, 0.0215041 , 0.01582952, 0.01584093,
0.01702626, 0.01709612, 0.01605132, 0.01609356, 0.01576487,
0.01621947, 0.01592497, 0.01688981, 0.01718203, 0.01709837,
0.0175492 , 0.01631064, 0.01699268, 0.01699004, 0.0173284 ,
0.01797874],
[0.02128622, 0.0215041 , 0.01582952, 0.01584093, 0.01702626,
0.01709612, 0.01605132, 0.01609356, 0.01576487, 0.01621947,
0.01592497, 0.01688981, 0.01718203, 0.01709837, 0.0175492 ,
0.01631064, 0.01699268, 0.01699004, 0.0173284 , 0.01797874,
0.01627171],
[0.0215041 , 0.01582952, 0.01584093, 0.01702626, 0.01709612,
0.01605132, 0.01609356, 0.01576487, 0.01621947, 0.01592497,
0.01688981, 0.01718203, 0.01709837, 0.0175492 , 0.01631064,
0.01699268, 0.01699004, 0.0173284 , 0.01797874, 0.01627171,
0.01640381],
[0.01582952, 0.01584093, 0.01702626, 0.01709612, 0.01605132,
0.01609356, 0.01576487, 0.01621947, 0.01592497, 0.01688981,
0.01718203, 0.01709837, 0.0175492 , 0.01631064, 0.01699268,
0.01699004, 0.0173284 , 0.01797874, 0.01627171, 0.01640381,
0.0163457 ],
[0.01584093, 0.01702626, 0.01709612, 0.01605132, 0.01609356,
0.01576487, 0.01621947, 0.01592497, 0.01688981, 0.01718203,
0.01709837, 0.0175492 , 0.01631064, 0.01699268, 0.01699004,
0.0173284 , 0.01797874, 0.01627171, 0.01640381, 0.0163457 ,
0.01949109],
[0.01702626, 0.01709612, 0.01605132, 0.01609356, 0.01576487,
0.01621947, 0.01592497, 0.01688981, 0.01718203, 0.01709837,
0.0175492 , 0.01631064, 0.01699268, 0.01699004, 0.0173284 ,
0.01797874, 0.01627171, 0.01640381, 0.0163457 , 0.01949109,
0.02197152],
[0.01709612, 0.01605132, 0.01609356, 0.01576487, 0.01621947,
0.01592497, 0.01688981, 0.01718203, 0.01709837, 0.0175492 ,
0.01631064, 0.01699268, 0.01699004, 0.0173284 , 0.01797874,
0.01627171, 0.01640381, 0.0163457 , 0.01949109, 0.02197152,
0.03071454],
[0.01605132, 0.01609356, 0.01576487, 0.01621947, 0.01592497,
0.01688981, 0.01718203, 0.01709837, 0.0175492 , 0.01631064,
0.01699268, 0.01699004, 0.0173284 , 0.01797874, 0.01627171,
0.01640381, 0.0163457 , 0.01949109, 0.02197152, 0.03071454,
0.03197917],
[0.01609356, 0.01576487, 0.01621947, 0.01592497, 0.01688981,
0.01718203, 0.01709837, 0.0175492 , 0.01631064, 0.01699268,
0.01699004, 0.0173284 , 0.01797874, 0.01627171, 0.01640381,
0.0163457 , 0.01949109, 0.02197152, 0.03071454, 0.03197917,
0.03195915],
[0.01576487, 0.01621947, 0.01592497, 0.01688981, 0.01718203,
0.01709837, 0.0175492 , 0.01631064, 0.01699268, 0.01699004,
0.0173284 , 0.01797874, 0.01627171, 0.01640381, 0.0163457 ,
0.01949109, 0.02197152, 0.03071454, 0.03197917, 0.03195915,
0.03180936]])
y_test_vola14_21
array([[0.01621947],
[0.01592497],
[0.01688981],
[0.01718203],
[0.01709837],
[0.0175492 ],
[0.01631064],
[0.01699268],
[0.01699004],
[0.0173284 ],
[0.01797874],
[0.01627171],
[0.01640381],
[0.0163457 ],
[0.01949109],
[0.02197152],
[0.03071454],
[0.03197917],
[0.03195915],
[0.03180936],
[0.03180612]])
Horizonte de 28 días (\(\tau=28\))
X_train_vola14_28, y_train_vola14_28, X_val_vola14_28, y_val_vola14_28, X_test_vola14_28, y_test_vola14_28 = create_time_series_datasets(df_1_st,'Volatilidad_14',tau28)
# Imprimir las dimensiones
print("X_train shape:", X_train_vola14_28.shape)
print("y_train shape:", y_train_vola14_28.shape)
print("X_val shape:", X_val_vola14_28.shape)
print("y_val shape:", y_val_vola14_28.shape)
print("X_test shape:", X_test_vola14_28.shape)
print("y_test shape:", y_test_vola14_28.shape)
X_train shape: (4887, 28)
y_train shape: (4887, 1)
X_val shape: (28, 28)
y_val shape: (28, 1)
X_test shape: (28, 28)
y_test shape: (28, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 28\).
X_train_vola14_28
array([[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
...,
[0.02020241, 0.02056699, 0.020185 , ..., 0.02068237, 0.01950113,
0.02027235],
[0.02056699, 0.020185 , 0.02352213, ..., 0.01950113, 0.02027235,
0.01928971],
[0.020185 , 0.02352213, 0.02064785, ..., 0.02027235, 0.01928971,
0.0191655 ]])
y_train_vola14_28
array([[0. ],
[0. ],
[0. ],
...,
[0.01928971],
[0.0191655 ],
[0.01816356]])
X_val_vola14_28
array([[0.02352213, 0.02064785, 0.01954596, 0.02011193, 0.02027472,
0.02097313, 0.0203994 , 0.02736212, 0.02727381, 0.02810602,
0.02791582, 0.02899915, 0.02873944, 0.02949093, 0.02747868,
0.02378801, 0.02559696, 0.0254107 , 0.02481118, 0.02469695,
0.02510062, 0.01924227, 0.02068237, 0.01950113, 0.02027235,
0.01928971, 0.0191655 , 0.01816356],
[0.02064785, 0.01954596, 0.02011193, 0.02027472, 0.02097313,
0.0203994 , 0.02736212, 0.02727381, 0.02810602, 0.02791582,
0.02899915, 0.02873944, 0.02949093, 0.02747868, 0.02378801,
0.02559696, 0.0254107 , 0.02481118, 0.02469695, 0.02510062,
0.01924227, 0.02068237, 0.01950113, 0.02027235, 0.01928971,
0.0191655 , 0.01816356, 0.02018555],
[0.01954596, 0.02011193, 0.02027472, 0.02097313, 0.0203994 ,
0.02736212, 0.02727381, 0.02810602, 0.02791582, 0.02899915,
0.02873944, 0.02949093, 0.02747868, 0.02378801, 0.02559696,
0.0254107 , 0.02481118, 0.02469695, 0.02510062, 0.01924227,
0.02068237, 0.01950113, 0.02027235, 0.01928971, 0.0191655 ,
0.01816356, 0.02018555, 0.0202123 ],
[0.02011193, 0.02027472, 0.02097313, 0.0203994 , 0.02736212,
0.02727381, 0.02810602, 0.02791582, 0.02899915, 0.02873944,
0.02949093, 0.02747868, 0.02378801, 0.02559696, 0.0254107 ,
0.02481118, 0.02469695, 0.02510062, 0.01924227, 0.02068237,
0.01950113, 0.02027235, 0.01928971, 0.0191655 , 0.01816356,
0.02018555, 0.0202123 , 0.02267684],
[0.02027472, 0.02097313, 0.0203994 , 0.02736212, 0.02727381,
0.02810602, 0.02791582, 0.02899915, 0.02873944, 0.02949093,
0.02747868, 0.02378801, 0.02559696, 0.0254107 , 0.02481118,
0.02469695, 0.02510062, 0.01924227, 0.02068237, 0.01950113,
0.02027235, 0.01928971, 0.0191655 , 0.01816356, 0.02018555,
0.0202123 , 0.02267684, 0.02421744],
[0.02097313, 0.0203994 , 0.02736212, 0.02727381, 0.02810602,
0.02791582, 0.02899915, 0.02873944, 0.02949093, 0.02747868,
0.02378801, 0.02559696, 0.0254107 , 0.02481118, 0.02469695,
0.02510062, 0.01924227, 0.02068237, 0.01950113, 0.02027235,
0.01928971, 0.0191655 , 0.01816356, 0.02018555, 0.0202123 ,
0.02267684, 0.02421744, 0.02421361],
[0.0203994 , 0.02736212, 0.02727381, 0.02810602, 0.02791582,
0.02899915, 0.02873944, 0.02949093, 0.02747868, 0.02378801,
0.02559696, 0.0254107 , 0.02481118, 0.02469695, 0.02510062,
0.01924227, 0.02068237, 0.01950113, 0.02027235, 0.01928971,
0.0191655 , 0.01816356, 0.02018555, 0.0202123 , 0.02267684,
0.02421744, 0.02421361, 0.02418457],
[0.02736212, 0.02727381, 0.02810602, 0.02791582, 0.02899915,
0.02873944, 0.02949093, 0.02747868, 0.02378801, 0.02559696,
0.0254107 , 0.02481118, 0.02469695, 0.02510062, 0.01924227,
0.02068237, 0.01950113, 0.02027235, 0.01928971, 0.0191655 ,
0.01816356, 0.02018555, 0.0202123 , 0.02267684, 0.02421744,
0.02421361, 0.02418457, 0.02367551],
[0.02727381, 0.02810602, 0.02791582, 0.02899915, 0.02873944,
0.02949093, 0.02747868, 0.02378801, 0.02559696, 0.0254107 ,
0.02481118, 0.02469695, 0.02510062, 0.01924227, 0.02068237,
0.01950113, 0.02027235, 0.01928971, 0.0191655 , 0.01816356,
0.02018555, 0.0202123 , 0.02267684, 0.02421744, 0.02421361,
0.02418457, 0.02367551, 0.02967658],
[0.02810602, 0.02791582, 0.02899915, 0.02873944, 0.02949093,
0.02747868, 0.02378801, 0.02559696, 0.0254107 , 0.02481118,
0.02469695, 0.02510062, 0.01924227, 0.02068237, 0.01950113,
0.02027235, 0.01928971, 0.0191655 , 0.01816356, 0.02018555,
0.0202123 , 0.02267684, 0.02421744, 0.02421361, 0.02418457,
0.02367551, 0.02967658, 0.02920221],
[0.02791582, 0.02899915, 0.02873944, 0.02949093, 0.02747868,
0.02378801, 0.02559696, 0.0254107 , 0.02481118, 0.02469695,
0.02510062, 0.01924227, 0.02068237, 0.01950113, 0.02027235,
0.01928971, 0.0191655 , 0.01816356, 0.02018555, 0.0202123 ,
0.02267684, 0.02421744, 0.02421361, 0.02418457, 0.02367551,
0.02967658, 0.02920221, 0.02889174],
[0.02899915, 0.02873944, 0.02949093, 0.02747868, 0.02378801,
0.02559696, 0.0254107 , 0.02481118, 0.02469695, 0.02510062,
0.01924227, 0.02068237, 0.01950113, 0.02027235, 0.01928971,
0.0191655 , 0.01816356, 0.02018555, 0.0202123 , 0.02267684,
0.02421744, 0.02421361, 0.02418457, 0.02367551, 0.02967658,
0.02920221, 0.02889174, 0.02818356],
[0.02873944, 0.02949093, 0.02747868, 0.02378801, 0.02559696,
0.0254107 , 0.02481118, 0.02469695, 0.02510062, 0.01924227,
0.02068237, 0.01950113, 0.02027235, 0.01928971, 0.0191655 ,
0.01816356, 0.02018555, 0.0202123 , 0.02267684, 0.02421744,
0.02421361, 0.02418457, 0.02367551, 0.02967658, 0.02920221,
0.02889174, 0.02818356, 0.0355796 ],
[0.02949093, 0.02747868, 0.02378801, 0.02559696, 0.0254107 ,
0.02481118, 0.02469695, 0.02510062, 0.01924227, 0.02068237,
0.01950113, 0.02027235, 0.01928971, 0.0191655 , 0.01816356,
0.02018555, 0.0202123 , 0.02267684, 0.02421744, 0.02421361,
0.02418457, 0.02367551, 0.02967658, 0.02920221, 0.02889174,
0.02818356, 0.0355796 , 0.03558185],
[0.02747868, 0.02378801, 0.02559696, 0.0254107 , 0.02481118,
0.02469695, 0.02510062, 0.01924227, 0.02068237, 0.01950113,
0.02027235, 0.01928971, 0.0191655 , 0.01816356, 0.02018555,
0.0202123 , 0.02267684, 0.02421744, 0.02421361, 0.02418457,
0.02367551, 0.02967658, 0.02920221, 0.02889174, 0.02818356,
0.0355796 , 0.03558185, 0.03632934],
[0.02378801, 0.02559696, 0.0254107 , 0.02481118, 0.02469695,
0.02510062, 0.01924227, 0.02068237, 0.01950113, 0.02027235,
0.01928971, 0.0191655 , 0.01816356, 0.02018555, 0.0202123 ,
0.02267684, 0.02421744, 0.02421361, 0.02418457, 0.02367551,
0.02967658, 0.02920221, 0.02889174, 0.02818356, 0.0355796 ,
0.03558185, 0.03632934, 0.03439932],
[0.02559696, 0.0254107 , 0.02481118, 0.02469695, 0.02510062,
0.01924227, 0.02068237, 0.01950113, 0.02027235, 0.01928971,
0.0191655 , 0.01816356, 0.02018555, 0.0202123 , 0.02267684,
0.02421744, 0.02421361, 0.02418457, 0.02367551, 0.02967658,
0.02920221, 0.02889174, 0.02818356, 0.0355796 , 0.03558185,
0.03632934, 0.03439932, 0.03430583],
[0.0254107 , 0.02481118, 0.02469695, 0.02510062, 0.01924227,
0.02068237, 0.01950113, 0.02027235, 0.01928971, 0.0191655 ,
0.01816356, 0.02018555, 0.0202123 , 0.02267684, 0.02421744,
0.02421361, 0.02418457, 0.02367551, 0.02967658, 0.02920221,
0.02889174, 0.02818356, 0.0355796 , 0.03558185, 0.03632934,
0.03439932, 0.03430583, 0.03192901],
[0.02481118, 0.02469695, 0.02510062, 0.01924227, 0.02068237,
0.01950113, 0.02027235, 0.01928971, 0.0191655 , 0.01816356,
0.02018555, 0.0202123 , 0.02267684, 0.02421744, 0.02421361,
0.02418457, 0.02367551, 0.02967658, 0.02920221, 0.02889174,
0.02818356, 0.0355796 , 0.03558185, 0.03632934, 0.03439932,
0.03430583, 0.03192901, 0.03190819],
[0.02469695, 0.02510062, 0.01924227, 0.02068237, 0.01950113,
0.02027235, 0.01928971, 0.0191655 , 0.01816356, 0.02018555,
0.0202123 , 0.02267684, 0.02421744, 0.02421361, 0.02418457,
0.02367551, 0.02967658, 0.02920221, 0.02889174, 0.02818356,
0.0355796 , 0.03558185, 0.03632934, 0.03439932, 0.03430583,
0.03192901, 0.03190819, 0.03207988],
[0.02510062, 0.01924227, 0.02068237, 0.01950113, 0.02027235,
0.01928971, 0.0191655 , 0.01816356, 0.02018555, 0.0202123 ,
0.02267684, 0.02421744, 0.02421361, 0.02418457, 0.02367551,
0.02967658, 0.02920221, 0.02889174, 0.02818356, 0.0355796 ,
0.03558185, 0.03632934, 0.03439932, 0.03430583, 0.03192901,
0.03190819, 0.03207988, 0.03210488],
[0.01924227, 0.02068237, 0.01950113, 0.02027235, 0.01928971,
0.0191655 , 0.01816356, 0.02018555, 0.0202123 , 0.02267684,
0.02421744, 0.02421361, 0.02418457, 0.02367551, 0.02967658,
0.02920221, 0.02889174, 0.02818356, 0.0355796 , 0.03558185,
0.03632934, 0.03439932, 0.03430583, 0.03192901, 0.03190819,
0.03207988, 0.03210488, 0.03209828],
[0.02068237, 0.01950113, 0.02027235, 0.01928971, 0.0191655 ,
0.01816356, 0.02018555, 0.0202123 , 0.02267684, 0.02421744,
0.02421361, 0.02418457, 0.02367551, 0.02967658, 0.02920221,
0.02889174, 0.02818356, 0.0355796 , 0.03558185, 0.03632934,
0.03439932, 0.03430583, 0.03192901, 0.03190819, 0.03207988,
0.03210488, 0.03209828, 0.02659769],
[0.01950113, 0.02027235, 0.01928971, 0.0191655 , 0.01816356,
0.02018555, 0.0202123 , 0.02267684, 0.02421744, 0.02421361,
0.02418457, 0.02367551, 0.02967658, 0.02920221, 0.02889174,
0.02818356, 0.0355796 , 0.03558185, 0.03632934, 0.03439932,
0.03430583, 0.03192901, 0.03190819, 0.03207988, 0.03210488,
0.03209828, 0.02659769, 0.02706552],
[0.02027235, 0.01928971, 0.0191655 , 0.01816356, 0.02018555,
0.0202123 , 0.02267684, 0.02421744, 0.02421361, 0.02418457,
0.02367551, 0.02967658, 0.02920221, 0.02889174, 0.02818356,
0.0355796 , 0.03558185, 0.03632934, 0.03439932, 0.03430583,
0.03192901, 0.03190819, 0.03207988, 0.03210488, 0.03209828,
0.02659769, 0.02706552, 0.02676048],
[0.01928971, 0.0191655 , 0.01816356, 0.02018555, 0.0202123 ,
0.02267684, 0.02421744, 0.02421361, 0.02418457, 0.02367551,
0.02967658, 0.02920221, 0.02889174, 0.02818356, 0.0355796 ,
0.03558185, 0.03632934, 0.03439932, 0.03430583, 0.03192901,
0.03190819, 0.03207988, 0.03210488, 0.03209828, 0.02659769,
0.02706552, 0.02676048, 0.02679602],
[0.0191655 , 0.01816356, 0.02018555, 0.0202123 , 0.02267684,
0.02421744, 0.02421361, 0.02418457, 0.02367551, 0.02967658,
0.02920221, 0.02889174, 0.02818356, 0.0355796 , 0.03558185,
0.03632934, 0.03439932, 0.03430583, 0.03192901, 0.03190819,
0.03207988, 0.03210488, 0.03209828, 0.02659769, 0.02706552,
0.02676048, 0.02679602, 0.02358944],
[0.01816356, 0.02018555, 0.0202123 , 0.02267684, 0.02421744,
0.02421361, 0.02418457, 0.02367551, 0.02967658, 0.02920221,
0.02889174, 0.02818356, 0.0355796 , 0.03558185, 0.03632934,
0.03439932, 0.03430583, 0.03192901, 0.03190819, 0.03207988,
0.03210488, 0.03209828, 0.02659769, 0.02706552, 0.02676048,
0.02679602, 0.02358944, 0.02370629]])
y_val_vola14_28
array([[0.02018555],
[0.0202123 ],
[0.02267684],
[0.02421744],
[0.02421361],
[0.02418457],
[0.02367551],
[0.02967658],
[0.02920221],
[0.02889174],
[0.02818356],
[0.0355796 ],
[0.03558185],
[0.03632934],
[0.03439932],
[0.03430583],
[0.03192901],
[0.03190819],
[0.03207988],
[0.03210488],
[0.03209828],
[0.02659769],
[0.02706552],
[0.02676048],
[0.02679602],
[0.02358944],
[0.02370629],
[0.02261878]])
X_test_vola14_28
array([[0.02018555, 0.0202123 , 0.02267684, 0.02421744, 0.02421361,
0.02418457, 0.02367551, 0.02967658, 0.02920221, 0.02889174,
0.02818356, 0.0355796 , 0.03558185, 0.03632934, 0.03439932,
0.03430583, 0.03192901, 0.03190819, 0.03207988, 0.03210488,
0.03209828, 0.02659769, 0.02706552, 0.02676048, 0.02679602,
0.02358944, 0.02370629, 0.02261878],
[0.0202123 , 0.02267684, 0.02421744, 0.02421361, 0.02418457,
0.02367551, 0.02967658, 0.02920221, 0.02889174, 0.02818356,
0.0355796 , 0.03558185, 0.03632934, 0.03439932, 0.03430583,
0.03192901, 0.03190819, 0.03207988, 0.03210488, 0.03209828,
0.02659769, 0.02706552, 0.02676048, 0.02679602, 0.02358944,
0.02370629, 0.02261878, 0.02353375],
[0.02267684, 0.02421744, 0.02421361, 0.02418457, 0.02367551,
0.02967658, 0.02920221, 0.02889174, 0.02818356, 0.0355796 ,
0.03558185, 0.03632934, 0.03439932, 0.03430583, 0.03192901,
0.03190819, 0.03207988, 0.03210488, 0.03209828, 0.02659769,
0.02706552, 0.02676048, 0.02679602, 0.02358944, 0.02370629,
0.02261878, 0.02353375, 0.02333393],
[0.02421744, 0.02421361, 0.02418457, 0.02367551, 0.02967658,
0.02920221, 0.02889174, 0.02818356, 0.0355796 , 0.03558185,
0.03632934, 0.03439932, 0.03430583, 0.03192901, 0.03190819,
0.03207988, 0.03210488, 0.03209828, 0.02659769, 0.02706552,
0.02676048, 0.02679602, 0.02358944, 0.02370629, 0.02261878,
0.02353375, 0.02333393, 0.02332778],
[0.02421361, 0.02418457, 0.02367551, 0.02967658, 0.02920221,
0.02889174, 0.02818356, 0.0355796 , 0.03558185, 0.03632934,
0.03439932, 0.03430583, 0.03192901, 0.03190819, 0.03207988,
0.03210488, 0.03209828, 0.02659769, 0.02706552, 0.02676048,
0.02679602, 0.02358944, 0.02370629, 0.02261878, 0.02353375,
0.02333393, 0.02332778, 0.02125759],
[0.02418457, 0.02367551, 0.02967658, 0.02920221, 0.02889174,
0.02818356, 0.0355796 , 0.03558185, 0.03632934, 0.03439932,
0.03430583, 0.03192901, 0.03190819, 0.03207988, 0.03210488,
0.03209828, 0.02659769, 0.02706552, 0.02676048, 0.02679602,
0.02358944, 0.02370629, 0.02261878, 0.02353375, 0.02333393,
0.02332778, 0.02125759, 0.0212016 ],
[0.02367551, 0.02967658, 0.02920221, 0.02889174, 0.02818356,
0.0355796 , 0.03558185, 0.03632934, 0.03439932, 0.03430583,
0.03192901, 0.03190819, 0.03207988, 0.03210488, 0.03209828,
0.02659769, 0.02706552, 0.02676048, 0.02679602, 0.02358944,
0.02370629, 0.02261878, 0.02353375, 0.02333393, 0.02332778,
0.02125759, 0.0212016 , 0.02128622],
[0.02967658, 0.02920221, 0.02889174, 0.02818356, 0.0355796 ,
0.03558185, 0.03632934, 0.03439932, 0.03430583, 0.03192901,
0.03190819, 0.03207988, 0.03210488, 0.03209828, 0.02659769,
0.02706552, 0.02676048, 0.02679602, 0.02358944, 0.02370629,
0.02261878, 0.02353375, 0.02333393, 0.02332778, 0.02125759,
0.0212016 , 0.02128622, 0.0215041 ],
[0.02920221, 0.02889174, 0.02818356, 0.0355796 , 0.03558185,
0.03632934, 0.03439932, 0.03430583, 0.03192901, 0.03190819,
0.03207988, 0.03210488, 0.03209828, 0.02659769, 0.02706552,
0.02676048, 0.02679602, 0.02358944, 0.02370629, 0.02261878,
0.02353375, 0.02333393, 0.02332778, 0.02125759, 0.0212016 ,
0.02128622, 0.0215041 , 0.01582952],
[0.02889174, 0.02818356, 0.0355796 , 0.03558185, 0.03632934,
0.03439932, 0.03430583, 0.03192901, 0.03190819, 0.03207988,
0.03210488, 0.03209828, 0.02659769, 0.02706552, 0.02676048,
0.02679602, 0.02358944, 0.02370629, 0.02261878, 0.02353375,
0.02333393, 0.02332778, 0.02125759, 0.0212016 , 0.02128622,
0.0215041 , 0.01582952, 0.01584093],
[0.02818356, 0.0355796 , 0.03558185, 0.03632934, 0.03439932,
0.03430583, 0.03192901, 0.03190819, 0.03207988, 0.03210488,
0.03209828, 0.02659769, 0.02706552, 0.02676048, 0.02679602,
0.02358944, 0.02370629, 0.02261878, 0.02353375, 0.02333393,
0.02332778, 0.02125759, 0.0212016 , 0.02128622, 0.0215041 ,
0.01582952, 0.01584093, 0.01702626],
[0.0355796 , 0.03558185, 0.03632934, 0.03439932, 0.03430583,
0.03192901, 0.03190819, 0.03207988, 0.03210488, 0.03209828,
0.02659769, 0.02706552, 0.02676048, 0.02679602, 0.02358944,
0.02370629, 0.02261878, 0.02353375, 0.02333393, 0.02332778,
0.02125759, 0.0212016 , 0.02128622, 0.0215041 , 0.01582952,
0.01584093, 0.01702626, 0.01709612],
[0.03558185, 0.03632934, 0.03439932, 0.03430583, 0.03192901,
0.03190819, 0.03207988, 0.03210488, 0.03209828, 0.02659769,
0.02706552, 0.02676048, 0.02679602, 0.02358944, 0.02370629,
0.02261878, 0.02353375, 0.02333393, 0.02332778, 0.02125759,
0.0212016 , 0.02128622, 0.0215041 , 0.01582952, 0.01584093,
0.01702626, 0.01709612, 0.01605132],
[0.03632934, 0.03439932, 0.03430583, 0.03192901, 0.03190819,
0.03207988, 0.03210488, 0.03209828, 0.02659769, 0.02706552,
0.02676048, 0.02679602, 0.02358944, 0.02370629, 0.02261878,
0.02353375, 0.02333393, 0.02332778, 0.02125759, 0.0212016 ,
0.02128622, 0.0215041 , 0.01582952, 0.01584093, 0.01702626,
0.01709612, 0.01605132, 0.01609356],
[0.03439932, 0.03430583, 0.03192901, 0.03190819, 0.03207988,
0.03210488, 0.03209828, 0.02659769, 0.02706552, 0.02676048,
0.02679602, 0.02358944, 0.02370629, 0.02261878, 0.02353375,
0.02333393, 0.02332778, 0.02125759, 0.0212016 , 0.02128622,
0.0215041 , 0.01582952, 0.01584093, 0.01702626, 0.01709612,
0.01605132, 0.01609356, 0.01576487],
[0.03430583, 0.03192901, 0.03190819, 0.03207988, 0.03210488,
0.03209828, 0.02659769, 0.02706552, 0.02676048, 0.02679602,
0.02358944, 0.02370629, 0.02261878, 0.02353375, 0.02333393,
0.02332778, 0.02125759, 0.0212016 , 0.02128622, 0.0215041 ,
0.01582952, 0.01584093, 0.01702626, 0.01709612, 0.01605132,
0.01609356, 0.01576487, 0.01621947],
[0.03192901, 0.03190819, 0.03207988, 0.03210488, 0.03209828,
0.02659769, 0.02706552, 0.02676048, 0.02679602, 0.02358944,
0.02370629, 0.02261878, 0.02353375, 0.02333393, 0.02332778,
0.02125759, 0.0212016 , 0.02128622, 0.0215041 , 0.01582952,
0.01584093, 0.01702626, 0.01709612, 0.01605132, 0.01609356,
0.01576487, 0.01621947, 0.01592497],
[0.03190819, 0.03207988, 0.03210488, 0.03209828, 0.02659769,
0.02706552, 0.02676048, 0.02679602, 0.02358944, 0.02370629,
0.02261878, 0.02353375, 0.02333393, 0.02332778, 0.02125759,
0.0212016 , 0.02128622, 0.0215041 , 0.01582952, 0.01584093,
0.01702626, 0.01709612, 0.01605132, 0.01609356, 0.01576487,
0.01621947, 0.01592497, 0.01688981],
[0.03207988, 0.03210488, 0.03209828, 0.02659769, 0.02706552,
0.02676048, 0.02679602, 0.02358944, 0.02370629, 0.02261878,
0.02353375, 0.02333393, 0.02332778, 0.02125759, 0.0212016 ,
0.02128622, 0.0215041 , 0.01582952, 0.01584093, 0.01702626,
0.01709612, 0.01605132, 0.01609356, 0.01576487, 0.01621947,
0.01592497, 0.01688981, 0.01718203],
[0.03210488, 0.03209828, 0.02659769, 0.02706552, 0.02676048,
0.02679602, 0.02358944, 0.02370629, 0.02261878, 0.02353375,
0.02333393, 0.02332778, 0.02125759, 0.0212016 , 0.02128622,
0.0215041 , 0.01582952, 0.01584093, 0.01702626, 0.01709612,
0.01605132, 0.01609356, 0.01576487, 0.01621947, 0.01592497,
0.01688981, 0.01718203, 0.01709837],
[0.03209828, 0.02659769, 0.02706552, 0.02676048, 0.02679602,
0.02358944, 0.02370629, 0.02261878, 0.02353375, 0.02333393,
0.02332778, 0.02125759, 0.0212016 , 0.02128622, 0.0215041 ,
0.01582952, 0.01584093, 0.01702626, 0.01709612, 0.01605132,
0.01609356, 0.01576487, 0.01621947, 0.01592497, 0.01688981,
0.01718203, 0.01709837, 0.0175492 ],
[0.02659769, 0.02706552, 0.02676048, 0.02679602, 0.02358944,
0.02370629, 0.02261878, 0.02353375, 0.02333393, 0.02332778,
0.02125759, 0.0212016 , 0.02128622, 0.0215041 , 0.01582952,
0.01584093, 0.01702626, 0.01709612, 0.01605132, 0.01609356,
0.01576487, 0.01621947, 0.01592497, 0.01688981, 0.01718203,
0.01709837, 0.0175492 , 0.01631064],
[0.02706552, 0.02676048, 0.02679602, 0.02358944, 0.02370629,
0.02261878, 0.02353375, 0.02333393, 0.02332778, 0.02125759,
0.0212016 , 0.02128622, 0.0215041 , 0.01582952, 0.01584093,
0.01702626, 0.01709612, 0.01605132, 0.01609356, 0.01576487,
0.01621947, 0.01592497, 0.01688981, 0.01718203, 0.01709837,
0.0175492 , 0.01631064, 0.01699268],
[0.02676048, 0.02679602, 0.02358944, 0.02370629, 0.02261878,
0.02353375, 0.02333393, 0.02332778, 0.02125759, 0.0212016 ,
0.02128622, 0.0215041 , 0.01582952, 0.01584093, 0.01702626,
0.01709612, 0.01605132, 0.01609356, 0.01576487, 0.01621947,
0.01592497, 0.01688981, 0.01718203, 0.01709837, 0.0175492 ,
0.01631064, 0.01699268, 0.01699004],
[0.02679602, 0.02358944, 0.02370629, 0.02261878, 0.02353375,
0.02333393, 0.02332778, 0.02125759, 0.0212016 , 0.02128622,
0.0215041 , 0.01582952, 0.01584093, 0.01702626, 0.01709612,
0.01605132, 0.01609356, 0.01576487, 0.01621947, 0.01592497,
0.01688981, 0.01718203, 0.01709837, 0.0175492 , 0.01631064,
0.01699268, 0.01699004, 0.0173284 ],
[0.02358944, 0.02370629, 0.02261878, 0.02353375, 0.02333393,
0.02332778, 0.02125759, 0.0212016 , 0.02128622, 0.0215041 ,
0.01582952, 0.01584093, 0.01702626, 0.01709612, 0.01605132,
0.01609356, 0.01576487, 0.01621947, 0.01592497, 0.01688981,
0.01718203, 0.01709837, 0.0175492 , 0.01631064, 0.01699268,
0.01699004, 0.0173284 , 0.01797874],
[0.02370629, 0.02261878, 0.02353375, 0.02333393, 0.02332778,
0.02125759, 0.0212016 , 0.02128622, 0.0215041 , 0.01582952,
0.01584093, 0.01702626, 0.01709612, 0.01605132, 0.01609356,
0.01576487, 0.01621947, 0.01592497, 0.01688981, 0.01718203,
0.01709837, 0.0175492 , 0.01631064, 0.01699268, 0.01699004,
0.0173284 , 0.01797874, 0.01627171],
[0.02261878, 0.02353375, 0.02333393, 0.02332778, 0.02125759,
0.0212016 , 0.02128622, 0.0215041 , 0.01582952, 0.01584093,
0.01702626, 0.01709612, 0.01605132, 0.01609356, 0.01576487,
0.01621947, 0.01592497, 0.01688981, 0.01718203, 0.01709837,
0.0175492 , 0.01631064, 0.01699268, 0.01699004, 0.0173284 ,
0.01797874, 0.01627171, 0.01640381]])
y_test_vola14_28
array([[0.02353375],
[0.02333393],
[0.02332778],
[0.02125759],
[0.0212016 ],
[0.02128622],
[0.0215041 ],
[0.01582952],
[0.01584093],
[0.01702626],
[0.01709612],
[0.01605132],
[0.01609356],
[0.01576487],
[0.01621947],
[0.01592497],
[0.01688981],
[0.01718203],
[0.01709837],
[0.0175492 ],
[0.01631064],
[0.01699268],
[0.01699004],
[0.0173284 ],
[0.01797874],
[0.01627171],
[0.01640381],
[0.0163457 ]])
\(\omega = 21\)
Horizonte de 7 días (\(\tau=7\))
X_train_vola21_7, y_train_vola21_7, X_val_vola21_7, y_val_vola21_7, X_test_vola21_7, y_test_vola21_7 = create_time_series_datasets(df_1_st,'Volatilidad_21',tau7)
# Imprimir las dimensiones
print("X_train shape:", X_train_vola21_7.shape)
print("y_train shape:", y_train_vola21_7.shape)
print("X_val shape:", X_val_vola21_7.shape)
print("y_val shape:", y_val_vola21_7.shape)
print("X_test shape:", X_test_vola21_7.shape)
print("y_test shape:", y_test_vola21_7.shape)
X_train shape: (4971, 7)
y_train shape: (4971, 1)
X_val shape: (7, 7)
y_val shape: (7, 1)
X_test shape: (7, 7)
y_test shape: (7, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 7\).
X_train_vola21_7
array([[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
...,
[0.01690949, 0.01676634, 0.01627876, ..., 0.020301 , 0.02682442,
0.02801343],
[0.01676634, 0.01627876, 0.01881878, ..., 0.02682442, 0.02801343,
0.02742416],
[0.01627876, 0.01881878, 0.020301 , ..., 0.02801343, 0.02742416,
0.02777887]])
y_train_vola21_7
array([[0. ],
[0. ],
[0. ],
...,
[0.02742416],
[0.02777887],
[0.02780064]])
X_val_vola21_7
array([[0.01881878, 0.020301 , 0.02682442, 0.02801343, 0.02742416,
0.02777887, 0.02780064],
[0.020301 , 0.02682442, 0.02801343, 0.02742416, 0.02777887,
0.02780064, 0.03125689],
[0.02682442, 0.02801343, 0.02742416, 0.02777887, 0.02780064,
0.03125689, 0.03572809],
[0.02801343, 0.02742416, 0.02777887, 0.02780064, 0.03125689,
0.03572809, 0.03551754],
[0.02742416, 0.02777887, 0.02780064, 0.03125689, 0.03572809,
0.03551754, 0.03544473],
[0.02777887, 0.02780064, 0.03125689, 0.03572809, 0.03551754,
0.03544473, 0.03542358],
[0.02780064, 0.03125689, 0.03572809, 0.03551754, 0.03544473,
0.03542358, 0.03512944]])
y_val_vola21_7
array([[0.03125689],
[0.03572809],
[0.03551754],
[0.03544473],
[0.03542358],
[0.03512944],
[0.03513217]])
X_test_vola21_7
array([[0.03125689, 0.03572809, 0.03551754, 0.03544473, 0.03542358,
0.03512944, 0.03513217],
[0.03572809, 0.03551754, 0.03544473, 0.03542358, 0.03512944,
0.03513217, 0.03544686],
[0.03551754, 0.03544473, 0.03542358, 0.03512944, 0.03513217,
0.03544686, 0.03584484],
[0.03544473, 0.03542358, 0.03512944, 0.03513217, 0.03544686,
0.03584484, 0.03546329],
[0.03542358, 0.03512944, 0.03513217, 0.03544686, 0.03584484,
0.03546329, 0.03605239],
[0.03512944, 0.03513217, 0.03544686, 0.03584484, 0.03546329,
0.03605239, 0.03680841],
[0.03513217, 0.03544686, 0.03584484, 0.03546329, 0.03605239,
0.03680841, 0.04031864]])
y_test_vola21_7
array([[0.03544686],
[0.03584484],
[0.03546329],
[0.03605239],
[0.03680841],
[0.04031864],
[0.04096493]])
Horizonte de 14 días (\(\tau=14\))
X_train_vola21_14, y_train_vola21_14, X_val_vola21_14, y_val_vola21_14, X_test_vola21_14, y_test_vola21_14 = create_time_series_datasets(df_1_st,'Volatilidad_21',tau14)
# Imprimir las dimensiones
print("X_train shape:", X_train_vola21_14.shape)
print("y_train shape:", y_train_vola21_14.shape)
print("X_val shape:", X_val_vola21_14.shape)
print("y_val shape:", y_val_vola21_14.shape)
print("X_test shape:", X_test_vola21_14.shape)
print("y_test shape:", y_test_vola21_14.shape)
X_train shape: (4943, 14)
y_train shape: (4943, 1)
X_val shape: (14, 14)
y_val shape: (14, 1)
X_test shape: (14, 14)
y_test shape: (14, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 14\).
X_train_vola21_14
array([[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
...,
[0.03014141, 0.0302081 , 0.0302079 , ..., 0.01936915, 0.02024194,
0.01884434],
[0.0302081 , 0.0302079 , 0.02663401, ..., 0.02024194, 0.01884434,
0.0204051 ],
[0.0302079 , 0.02663401, 0.02645964, ..., 0.01884434, 0.0204051 ,
0.02043112]])
y_train_vola21_14
array([[0. ],
[0. ],
[0. ],
...,
[0.0204051 ],
[0.02043112],
[0.02033406]])
X_val_vola21_14
array([[0.02663401, 0.02645964, 0.0262877 , 0.02650421, 0.02063035,
0.02065898, 0.01992985, 0.01954443, 0.01936915, 0.02024194,
0.01884434, 0.0204051 , 0.02043112, 0.02033406],
[0.02645964, 0.0262877 , 0.02650421, 0.02063035, 0.02065898,
0.01992985, 0.01954443, 0.01936915, 0.02024194, 0.01884434,
0.0204051 , 0.02043112, 0.02033406, 0.01663176],
[0.0262877 , 0.02650421, 0.02063035, 0.02065898, 0.01992985,
0.01954443, 0.01936915, 0.02024194, 0.01884434, 0.0204051 ,
0.02043112, 0.02033406, 0.01663176, 0.01697907],
[0.02650421, 0.02063035, 0.02065898, 0.01992985, 0.01954443,
0.01936915, 0.02024194, 0.01884434, 0.0204051 , 0.02043112,
0.02033406, 0.01663176, 0.01697907, 0.01819758],
[0.02063035, 0.02065898, 0.01992985, 0.01954443, 0.01936915,
0.02024194, 0.01884434, 0.0204051 , 0.02043112, 0.02033406,
0.01663176, 0.01697907, 0.01819758, 0.01797544],
[0.02065898, 0.01992985, 0.01954443, 0.01936915, 0.02024194,
0.01884434, 0.0204051 , 0.02043112, 0.02033406, 0.01663176,
0.01697907, 0.01819758, 0.01797544, 0.01623343],
[0.01992985, 0.01954443, 0.01936915, 0.02024194, 0.01884434,
0.0204051 , 0.02043112, 0.02033406, 0.01663176, 0.01697907,
0.01819758, 0.01797544, 0.01623343, 0.01680528],
[0.01954443, 0.01936915, 0.02024194, 0.01884434, 0.0204051 ,
0.02043112, 0.02033406, 0.01663176, 0.01697907, 0.01819758,
0.01797544, 0.01623343, 0.01680528, 0.01658037],
[0.01936915, 0.02024194, 0.01884434, 0.0204051 , 0.02043112,
0.02033406, 0.01663176, 0.01697907, 0.01819758, 0.01797544,
0.01623343, 0.01680528, 0.01658037, 0.01631993],
[0.02024194, 0.01884434, 0.0204051 , 0.02043112, 0.02033406,
0.01663176, 0.01697907, 0.01819758, 0.01797544, 0.01623343,
0.01680528, 0.01658037, 0.01631993, 0.0158569 ],
[0.01884434, 0.0204051 , 0.02043112, 0.02033406, 0.01663176,
0.01697907, 0.01819758, 0.01797544, 0.01623343, 0.01680528,
0.01658037, 0.01631993, 0.0158569 , 0.01581449],
[0.0204051 , 0.02043112, 0.02033406, 0.01663176, 0.01697907,
0.01819758, 0.01797544, 0.01623343, 0.01680528, 0.01658037,
0.01631993, 0.0158569 , 0.01581449, 0.01638668],
[0.02043112, 0.02033406, 0.01663176, 0.01697907, 0.01819758,
0.01797544, 0.01623343, 0.01680528, 0.01658037, 0.01631993,
0.0158569 , 0.01581449, 0.01638668, 0.01690949],
[0.02033406, 0.01663176, 0.01697907, 0.01819758, 0.01797544,
0.01623343, 0.01680528, 0.01658037, 0.01631993, 0.0158569 ,
0.01581449, 0.01638668, 0.01690949, 0.01676634]])
y_val_vola21_14
array([[0.01663176],
[0.01697907],
[0.01819758],
[0.01797544],
[0.01623343],
[0.01680528],
[0.01658037],
[0.01631993],
[0.0158569 ],
[0.01581449],
[0.01638668],
[0.01690949],
[0.01676634],
[0.01627876]])
X_test_vola21_14
array([[0.01663176, 0.01697907, 0.01819758, 0.01797544, 0.01623343,
0.01680528, 0.01658037, 0.01631993, 0.0158569 , 0.01581449,
0.01638668, 0.01690949, 0.01676634, 0.01627876],
[0.01697907, 0.01819758, 0.01797544, 0.01623343, 0.01680528,
0.01658037, 0.01631993, 0.0158569 , 0.01581449, 0.01638668,
0.01690949, 0.01676634, 0.01627876, 0.01881878],
[0.01819758, 0.01797544, 0.01623343, 0.01680528, 0.01658037,
0.01631993, 0.0158569 , 0.01581449, 0.01638668, 0.01690949,
0.01676634, 0.01627876, 0.01881878, 0.020301 ],
[0.01797544, 0.01623343, 0.01680528, 0.01658037, 0.01631993,
0.0158569 , 0.01581449, 0.01638668, 0.01690949, 0.01676634,
0.01627876, 0.01881878, 0.020301 , 0.02682442],
[0.01623343, 0.01680528, 0.01658037, 0.01631993, 0.0158569 ,
0.01581449, 0.01638668, 0.01690949, 0.01676634, 0.01627876,
0.01881878, 0.020301 , 0.02682442, 0.02801343],
[0.01680528, 0.01658037, 0.01631993, 0.0158569 , 0.01581449,
0.01638668, 0.01690949, 0.01676634, 0.01627876, 0.01881878,
0.020301 , 0.02682442, 0.02801343, 0.02742416],
[0.01658037, 0.01631993, 0.0158569 , 0.01581449, 0.01638668,
0.01690949, 0.01676634, 0.01627876, 0.01881878, 0.020301 ,
0.02682442, 0.02801343, 0.02742416, 0.02777887],
[0.01631993, 0.0158569 , 0.01581449, 0.01638668, 0.01690949,
0.01676634, 0.01627876, 0.01881878, 0.020301 , 0.02682442,
0.02801343, 0.02742416, 0.02777887, 0.02780064],
[0.0158569 , 0.01581449, 0.01638668, 0.01690949, 0.01676634,
0.01627876, 0.01881878, 0.020301 , 0.02682442, 0.02801343,
0.02742416, 0.02777887, 0.02780064, 0.03125689],
[0.01581449, 0.01638668, 0.01690949, 0.01676634, 0.01627876,
0.01881878, 0.020301 , 0.02682442, 0.02801343, 0.02742416,
0.02777887, 0.02780064, 0.03125689, 0.03572809],
[0.01638668, 0.01690949, 0.01676634, 0.01627876, 0.01881878,
0.020301 , 0.02682442, 0.02801343, 0.02742416, 0.02777887,
0.02780064, 0.03125689, 0.03572809, 0.03551754],
[0.01690949, 0.01676634, 0.01627876, 0.01881878, 0.020301 ,
0.02682442, 0.02801343, 0.02742416, 0.02777887, 0.02780064,
0.03125689, 0.03572809, 0.03551754, 0.03544473],
[0.01676634, 0.01627876, 0.01881878, 0.020301 , 0.02682442,
0.02801343, 0.02742416, 0.02777887, 0.02780064, 0.03125689,
0.03572809, 0.03551754, 0.03544473, 0.03542358],
[0.01627876, 0.01881878, 0.020301 , 0.02682442, 0.02801343,
0.02742416, 0.02777887, 0.02780064, 0.03125689, 0.03572809,
0.03551754, 0.03544473, 0.03542358, 0.03512944]])
y_test_vola21_14
array([[0.01881878],
[0.020301 ],
[0.02682442],
[0.02801343],
[0.02742416],
[0.02777887],
[0.02780064],
[0.03125689],
[0.03572809],
[0.03551754],
[0.03544473],
[0.03542358],
[0.03512944],
[0.03513217]])
Horizonte de 21 días (\(\tau=21\))
X_train_vola21_21, y_train_vola21_21, X_val_vola21_21, y_val_vola21_21, X_test_vola21_21, y_test_vola21_21 = create_time_series_datasets(df_1_st,'Volatilidad_21',tau21)
# Imprimir las dimensiones
print("X_train shape:", X_train_vola21_21.shape)
print("y_train shape:", y_train_vola21_21.shape)
print("X_val shape:", X_val_vola21_21.shape)
print("y_val shape:", y_val_vola21_21.shape)
print("X_test shape:", X_test_vola21_21.shape)
print("y_test shape:", y_test_vola21_21.shape)
X_train shape: (4915, 21)
y_train shape: (4915, 1)
X_val shape: (21, 21)
y_val shape: (21, 1)
X_test shape: (21, 21)
y_test shape: (21, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 21\).
X_train_vola21_21
array([[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
...,
[0.02240124, 0.02233188, 0.02234558, ..., 0.03059688, 0.03028503,
0.03091824],
[0.02233188, 0.02234558, 0.02057889, ..., 0.03028503, 0.03091824,
0.0308798 ],
[0.02234558, 0.02057889, 0.02079376, ..., 0.03091824, 0.0308798 ,
0.03087893]])
y_train_vola21_21
array([[0. ],
[0. ],
[0. ],
...,
[0.0308798 ],
[0.03087893],
[0.03087461]])
X_val_vola21_21
array([[0.02057889, 0.02079376, 0.02241768, 0.02339308, 0.02256937,
0.02258226, 0.02195136, 0.02562631, 0.0259223 , 0.02513092,
0.02521293, 0.03050996, 0.03048837, 0.03079356, 0.03091499,
0.03059688, 0.03028503, 0.03091824, 0.0308798 , 0.03087893,
0.03087461],
[0.02079376, 0.02241768, 0.02339308, 0.02256937, 0.02258226,
0.02195136, 0.02562631, 0.0259223 , 0.02513092, 0.02521293,
0.03050996, 0.03048837, 0.03079356, 0.03091499, 0.03059688,
0.03028503, 0.03091824, 0.0308798 , 0.03087893, 0.03087461,
0.0307535 ],
[0.02241768, 0.02339308, 0.02256937, 0.02258226, 0.02195136,
0.02562631, 0.0259223 , 0.02513092, 0.02521293, 0.03050996,
0.03048837, 0.03079356, 0.03091499, 0.03059688, 0.03028503,
0.03091824, 0.0308798 , 0.03087893, 0.03087461, 0.0307535 ,
0.03049742],
[0.02339308, 0.02256937, 0.02258226, 0.02195136, 0.02562631,
0.0259223 , 0.02513092, 0.02521293, 0.03050996, 0.03048837,
0.03079356, 0.03091499, 0.03059688, 0.03028503, 0.03091824,
0.0308798 , 0.03087893, 0.03087461, 0.0307535 , 0.03049742,
0.02901752],
[0.02256937, 0.02258226, 0.02195136, 0.02562631, 0.0259223 ,
0.02513092, 0.02521293, 0.03050996, 0.03048837, 0.03079356,
0.03091499, 0.03059688, 0.03028503, 0.03091824, 0.0308798 ,
0.03087893, 0.03087461, 0.0307535 , 0.03049742, 0.02901752,
0.02797885],
[0.02258226, 0.02195136, 0.02562631, 0.0259223 , 0.02513092,
0.02521293, 0.03050996, 0.03048837, 0.03079356, 0.03091499,
0.03059688, 0.03028503, 0.03091824, 0.0308798 , 0.03087893,
0.03087461, 0.0307535 , 0.03049742, 0.02901752, 0.02797885,
0.03014141],
[0.02195136, 0.02562631, 0.0259223 , 0.02513092, 0.02521293,
0.03050996, 0.03048837, 0.03079356, 0.03091499, 0.03059688,
0.03028503, 0.03091824, 0.0308798 , 0.03087893, 0.03087461,
0.0307535 , 0.03049742, 0.02901752, 0.02797885, 0.03014141,
0.0302081 ],
[0.02562631, 0.0259223 , 0.02513092, 0.02521293, 0.03050996,
0.03048837, 0.03079356, 0.03091499, 0.03059688, 0.03028503,
0.03091824, 0.0308798 , 0.03087893, 0.03087461, 0.0307535 ,
0.03049742, 0.02901752, 0.02797885, 0.03014141, 0.0302081 ,
0.0302079 ],
[0.0259223 , 0.02513092, 0.02521293, 0.03050996, 0.03048837,
0.03079356, 0.03091499, 0.03059688, 0.03028503, 0.03091824,
0.0308798 , 0.03087893, 0.03087461, 0.0307535 , 0.03049742,
0.02901752, 0.02797885, 0.03014141, 0.0302081 , 0.0302079 ,
0.02663401],
[0.02513092, 0.02521293, 0.03050996, 0.03048837, 0.03079356,
0.03091499, 0.03059688, 0.03028503, 0.03091824, 0.0308798 ,
0.03087893, 0.03087461, 0.0307535 , 0.03049742, 0.02901752,
0.02797885, 0.03014141, 0.0302081 , 0.0302079 , 0.02663401,
0.02645964],
[0.02521293, 0.03050996, 0.03048837, 0.03079356, 0.03091499,
0.03059688, 0.03028503, 0.03091824, 0.0308798 , 0.03087893,
0.03087461, 0.0307535 , 0.03049742, 0.02901752, 0.02797885,
0.03014141, 0.0302081 , 0.0302079 , 0.02663401, 0.02645964,
0.0262877 ],
[0.03050996, 0.03048837, 0.03079356, 0.03091499, 0.03059688,
0.03028503, 0.03091824, 0.0308798 , 0.03087893, 0.03087461,
0.0307535 , 0.03049742, 0.02901752, 0.02797885, 0.03014141,
0.0302081 , 0.0302079 , 0.02663401, 0.02645964, 0.0262877 ,
0.02650421],
[0.03048837, 0.03079356, 0.03091499, 0.03059688, 0.03028503,
0.03091824, 0.0308798 , 0.03087893, 0.03087461, 0.0307535 ,
0.03049742, 0.02901752, 0.02797885, 0.03014141, 0.0302081 ,
0.0302079 , 0.02663401, 0.02645964, 0.0262877 , 0.02650421,
0.02063035],
[0.03079356, 0.03091499, 0.03059688, 0.03028503, 0.03091824,
0.0308798 , 0.03087893, 0.03087461, 0.0307535 , 0.03049742,
0.02901752, 0.02797885, 0.03014141, 0.0302081 , 0.0302079 ,
0.02663401, 0.02645964, 0.0262877 , 0.02650421, 0.02063035,
0.02065898],
[0.03091499, 0.03059688, 0.03028503, 0.03091824, 0.0308798 ,
0.03087893, 0.03087461, 0.0307535 , 0.03049742, 0.02901752,
0.02797885, 0.03014141, 0.0302081 , 0.0302079 , 0.02663401,
0.02645964, 0.0262877 , 0.02650421, 0.02063035, 0.02065898,
0.01992985],
[0.03059688, 0.03028503, 0.03091824, 0.0308798 , 0.03087893,
0.03087461, 0.0307535 , 0.03049742, 0.02901752, 0.02797885,
0.03014141, 0.0302081 , 0.0302079 , 0.02663401, 0.02645964,
0.0262877 , 0.02650421, 0.02063035, 0.02065898, 0.01992985,
0.01954443],
[0.03028503, 0.03091824, 0.0308798 , 0.03087893, 0.03087461,
0.0307535 , 0.03049742, 0.02901752, 0.02797885, 0.03014141,
0.0302081 , 0.0302079 , 0.02663401, 0.02645964, 0.0262877 ,
0.02650421, 0.02063035, 0.02065898, 0.01992985, 0.01954443,
0.01936915],
[0.03091824, 0.0308798 , 0.03087893, 0.03087461, 0.0307535 ,
0.03049742, 0.02901752, 0.02797885, 0.03014141, 0.0302081 ,
0.0302079 , 0.02663401, 0.02645964, 0.0262877 , 0.02650421,
0.02063035, 0.02065898, 0.01992985, 0.01954443, 0.01936915,
0.02024194],
[0.0308798 , 0.03087893, 0.03087461, 0.0307535 , 0.03049742,
0.02901752, 0.02797885, 0.03014141, 0.0302081 , 0.0302079 ,
0.02663401, 0.02645964, 0.0262877 , 0.02650421, 0.02063035,
0.02065898, 0.01992985, 0.01954443, 0.01936915, 0.02024194,
0.01884434],
[0.03087893, 0.03087461, 0.0307535 , 0.03049742, 0.02901752,
0.02797885, 0.03014141, 0.0302081 , 0.0302079 , 0.02663401,
0.02645964, 0.0262877 , 0.02650421, 0.02063035, 0.02065898,
0.01992985, 0.01954443, 0.01936915, 0.02024194, 0.01884434,
0.0204051 ],
[0.03087461, 0.0307535 , 0.03049742, 0.02901752, 0.02797885,
0.03014141, 0.0302081 , 0.0302079 , 0.02663401, 0.02645964,
0.0262877 , 0.02650421, 0.02063035, 0.02065898, 0.01992985,
0.01954443, 0.01936915, 0.02024194, 0.01884434, 0.0204051 ,
0.02043112]])
y_val_vola21_21
array([[0.0307535 ],
[0.03049742],
[0.02901752],
[0.02797885],
[0.03014141],
[0.0302081 ],
[0.0302079 ],
[0.02663401],
[0.02645964],
[0.0262877 ],
[0.02650421],
[0.02063035],
[0.02065898],
[0.01992985],
[0.01954443],
[0.01936915],
[0.02024194],
[0.01884434],
[0.0204051 ],
[0.02043112],
[0.02033406]])
X_test_vola21_21
array([[0.0307535 , 0.03049742, 0.02901752, 0.02797885, 0.03014141,
0.0302081 , 0.0302079 , 0.02663401, 0.02645964, 0.0262877 ,
0.02650421, 0.02063035, 0.02065898, 0.01992985, 0.01954443,
0.01936915, 0.02024194, 0.01884434, 0.0204051 , 0.02043112,
0.02033406],
[0.03049742, 0.02901752, 0.02797885, 0.03014141, 0.0302081 ,
0.0302079 , 0.02663401, 0.02645964, 0.0262877 , 0.02650421,
0.02063035, 0.02065898, 0.01992985, 0.01954443, 0.01936915,
0.02024194, 0.01884434, 0.0204051 , 0.02043112, 0.02033406,
0.01663176],
[0.02901752, 0.02797885, 0.03014141, 0.0302081 , 0.0302079 ,
0.02663401, 0.02645964, 0.0262877 , 0.02650421, 0.02063035,
0.02065898, 0.01992985, 0.01954443, 0.01936915, 0.02024194,
0.01884434, 0.0204051 , 0.02043112, 0.02033406, 0.01663176,
0.01697907],
[0.02797885, 0.03014141, 0.0302081 , 0.0302079 , 0.02663401,
0.02645964, 0.0262877 , 0.02650421, 0.02063035, 0.02065898,
0.01992985, 0.01954443, 0.01936915, 0.02024194, 0.01884434,
0.0204051 , 0.02043112, 0.02033406, 0.01663176, 0.01697907,
0.01819758],
[0.03014141, 0.0302081 , 0.0302079 , 0.02663401, 0.02645964,
0.0262877 , 0.02650421, 0.02063035, 0.02065898, 0.01992985,
0.01954443, 0.01936915, 0.02024194, 0.01884434, 0.0204051 ,
0.02043112, 0.02033406, 0.01663176, 0.01697907, 0.01819758,
0.01797544],
[0.0302081 , 0.0302079 , 0.02663401, 0.02645964, 0.0262877 ,
0.02650421, 0.02063035, 0.02065898, 0.01992985, 0.01954443,
0.01936915, 0.02024194, 0.01884434, 0.0204051 , 0.02043112,
0.02033406, 0.01663176, 0.01697907, 0.01819758, 0.01797544,
0.01623343],
[0.0302079 , 0.02663401, 0.02645964, 0.0262877 , 0.02650421,
0.02063035, 0.02065898, 0.01992985, 0.01954443, 0.01936915,
0.02024194, 0.01884434, 0.0204051 , 0.02043112, 0.02033406,
0.01663176, 0.01697907, 0.01819758, 0.01797544, 0.01623343,
0.01680528],
[0.02663401, 0.02645964, 0.0262877 , 0.02650421, 0.02063035,
0.02065898, 0.01992985, 0.01954443, 0.01936915, 0.02024194,
0.01884434, 0.0204051 , 0.02043112, 0.02033406, 0.01663176,
0.01697907, 0.01819758, 0.01797544, 0.01623343, 0.01680528,
0.01658037],
[0.02645964, 0.0262877 , 0.02650421, 0.02063035, 0.02065898,
0.01992985, 0.01954443, 0.01936915, 0.02024194, 0.01884434,
0.0204051 , 0.02043112, 0.02033406, 0.01663176, 0.01697907,
0.01819758, 0.01797544, 0.01623343, 0.01680528, 0.01658037,
0.01631993],
[0.0262877 , 0.02650421, 0.02063035, 0.02065898, 0.01992985,
0.01954443, 0.01936915, 0.02024194, 0.01884434, 0.0204051 ,
0.02043112, 0.02033406, 0.01663176, 0.01697907, 0.01819758,
0.01797544, 0.01623343, 0.01680528, 0.01658037, 0.01631993,
0.0158569 ],
[0.02650421, 0.02063035, 0.02065898, 0.01992985, 0.01954443,
0.01936915, 0.02024194, 0.01884434, 0.0204051 , 0.02043112,
0.02033406, 0.01663176, 0.01697907, 0.01819758, 0.01797544,
0.01623343, 0.01680528, 0.01658037, 0.01631993, 0.0158569 ,
0.01581449],
[0.02063035, 0.02065898, 0.01992985, 0.01954443, 0.01936915,
0.02024194, 0.01884434, 0.0204051 , 0.02043112, 0.02033406,
0.01663176, 0.01697907, 0.01819758, 0.01797544, 0.01623343,
0.01680528, 0.01658037, 0.01631993, 0.0158569 , 0.01581449,
0.01638668],
[0.02065898, 0.01992985, 0.01954443, 0.01936915, 0.02024194,
0.01884434, 0.0204051 , 0.02043112, 0.02033406, 0.01663176,
0.01697907, 0.01819758, 0.01797544, 0.01623343, 0.01680528,
0.01658037, 0.01631993, 0.0158569 , 0.01581449, 0.01638668,
0.01690949],
[0.01992985, 0.01954443, 0.01936915, 0.02024194, 0.01884434,
0.0204051 , 0.02043112, 0.02033406, 0.01663176, 0.01697907,
0.01819758, 0.01797544, 0.01623343, 0.01680528, 0.01658037,
0.01631993, 0.0158569 , 0.01581449, 0.01638668, 0.01690949,
0.01676634],
[0.01954443, 0.01936915, 0.02024194, 0.01884434, 0.0204051 ,
0.02043112, 0.02033406, 0.01663176, 0.01697907, 0.01819758,
0.01797544, 0.01623343, 0.01680528, 0.01658037, 0.01631993,
0.0158569 , 0.01581449, 0.01638668, 0.01690949, 0.01676634,
0.01627876],
[0.01936915, 0.02024194, 0.01884434, 0.0204051 , 0.02043112,
0.02033406, 0.01663176, 0.01697907, 0.01819758, 0.01797544,
0.01623343, 0.01680528, 0.01658037, 0.01631993, 0.0158569 ,
0.01581449, 0.01638668, 0.01690949, 0.01676634, 0.01627876,
0.01881878],
[0.02024194, 0.01884434, 0.0204051 , 0.02043112, 0.02033406,
0.01663176, 0.01697907, 0.01819758, 0.01797544, 0.01623343,
0.01680528, 0.01658037, 0.01631993, 0.0158569 , 0.01581449,
0.01638668, 0.01690949, 0.01676634, 0.01627876, 0.01881878,
0.020301 ],
[0.01884434, 0.0204051 , 0.02043112, 0.02033406, 0.01663176,
0.01697907, 0.01819758, 0.01797544, 0.01623343, 0.01680528,
0.01658037, 0.01631993, 0.0158569 , 0.01581449, 0.01638668,
0.01690949, 0.01676634, 0.01627876, 0.01881878, 0.020301 ,
0.02682442],
[0.0204051 , 0.02043112, 0.02033406, 0.01663176, 0.01697907,
0.01819758, 0.01797544, 0.01623343, 0.01680528, 0.01658037,
0.01631993, 0.0158569 , 0.01581449, 0.01638668, 0.01690949,
0.01676634, 0.01627876, 0.01881878, 0.020301 , 0.02682442,
0.02801343],
[0.02043112, 0.02033406, 0.01663176, 0.01697907, 0.01819758,
0.01797544, 0.01623343, 0.01680528, 0.01658037, 0.01631993,
0.0158569 , 0.01581449, 0.01638668, 0.01690949, 0.01676634,
0.01627876, 0.01881878, 0.020301 , 0.02682442, 0.02801343,
0.02742416],
[0.02033406, 0.01663176, 0.01697907, 0.01819758, 0.01797544,
0.01623343, 0.01680528, 0.01658037, 0.01631993, 0.0158569 ,
0.01581449, 0.01638668, 0.01690949, 0.01676634, 0.01627876,
0.01881878, 0.020301 , 0.02682442, 0.02801343, 0.02742416,
0.02777887]])
y_test_vola21_21
array([[0.01663176],
[0.01697907],
[0.01819758],
[0.01797544],
[0.01623343],
[0.01680528],
[0.01658037],
[0.01631993],
[0.0158569 ],
[0.01581449],
[0.01638668],
[0.01690949],
[0.01676634],
[0.01627876],
[0.01881878],
[0.020301 ],
[0.02682442],
[0.02801343],
[0.02742416],
[0.02777887],
[0.02780064]])
Horizonte de 28 días (\(\tau=28\))
X_train_vola21_28, y_train_vola21_28, X_val_vola21_28, y_val_vola21_28, X_test_vola21_28, y_test_vola21_28 = create_time_series_datasets(df_1_st,'Volatilidad_21',tau28)
# Imprimir las dimensiones
print("X_train shape:", X_train_vola21_28.shape)
print("y_train shape:", y_train_vola21_28.shape)
print("X_val shape:", X_val_vola21_28.shape)
print("y_val shape:", y_val_vola21_28.shape)
print("X_test shape:", X_test_vola21_28.shape)
print("y_test shape:", y_test_vola21_28.shape)
X_train shape: (4887, 28)
y_train shape: (4887, 1)
X_val shape: (28, 28)
y_val shape: (28, 1)
X_test shape: (28, 28)
y_test shape: (28, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 28\).
X_train_vola21_28
array([[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
...,
[0.02535099, 0.02560046, 0.0256535 , ..., 0.02196357, 0.02250512,
0.02280028],
[0.02560046, 0.0256535 , 0.02714042, ..., 0.02250512, 0.02280028,
0.02240124],
[0.0256535 , 0.02714042, 0.02759058, ..., 0.02280028, 0.02240124,
0.02233188]])
y_train_vola21_28
array([[0. ],
[0. ],
[0. ],
...,
[0.02240124],
[0.02233188],
[0.02234558]])
X_val_vola21_28
array([[0.02714042, 0.02759058, 0.02478046, 0.02214729, 0.02228425,
0.02259596, 0.02245239, 0.02664624, 0.0241948 , 0.02345985,
0.0233654 , 0.02439022, 0.02438403, 0.02490249, 0.02541036,
0.02556494, 0.02623063, 0.02611958, 0.02579728, 0.02573722,
0.02610021, 0.02396126, 0.02196357, 0.02250512, 0.02280028,
0.02240124, 0.02233188, 0.02234558],
[0.02759058, 0.02478046, 0.02214729, 0.02228425, 0.02259596,
0.02245239, 0.02664624, 0.0241948 , 0.02345985, 0.0233654 ,
0.02439022, 0.02438403, 0.02490249, 0.02541036, 0.02556494,
0.02623063, 0.02611958, 0.02579728, 0.02573722, 0.02610021,
0.02396126, 0.02196357, 0.02250512, 0.02280028, 0.02240124,
0.02233188, 0.02234558, 0.02057889],
[0.02478046, 0.02214729, 0.02228425, 0.02259596, 0.02245239,
0.02664624, 0.0241948 , 0.02345985, 0.0233654 , 0.02439022,
0.02438403, 0.02490249, 0.02541036, 0.02556494, 0.02623063,
0.02611958, 0.02579728, 0.02573722, 0.02610021, 0.02396126,
0.02196357, 0.02250512, 0.02280028, 0.02240124, 0.02233188,
0.02234558, 0.02057889, 0.02079376],
[0.02214729, 0.02228425, 0.02259596, 0.02245239, 0.02664624,
0.0241948 , 0.02345985, 0.0233654 , 0.02439022, 0.02438403,
0.02490249, 0.02541036, 0.02556494, 0.02623063, 0.02611958,
0.02579728, 0.02573722, 0.02610021, 0.02396126, 0.02196357,
0.02250512, 0.02280028, 0.02240124, 0.02233188, 0.02234558,
0.02057889, 0.02079376, 0.02241768],
[0.02228425, 0.02259596, 0.02245239, 0.02664624, 0.0241948 ,
0.02345985, 0.0233654 , 0.02439022, 0.02438403, 0.02490249,
0.02541036, 0.02556494, 0.02623063, 0.02611958, 0.02579728,
0.02573722, 0.02610021, 0.02396126, 0.02196357, 0.02250512,
0.02280028, 0.02240124, 0.02233188, 0.02234558, 0.02057889,
0.02079376, 0.02241768, 0.02339308],
[0.02259596, 0.02245239, 0.02664624, 0.0241948 , 0.02345985,
0.0233654 , 0.02439022, 0.02438403, 0.02490249, 0.02541036,
0.02556494, 0.02623063, 0.02611958, 0.02579728, 0.02573722,
0.02610021, 0.02396126, 0.02196357, 0.02250512, 0.02280028,
0.02240124, 0.02233188, 0.02234558, 0.02057889, 0.02079376,
0.02241768, 0.02339308, 0.02256937],
[0.02245239, 0.02664624, 0.0241948 , 0.02345985, 0.0233654 ,
0.02439022, 0.02438403, 0.02490249, 0.02541036, 0.02556494,
0.02623063, 0.02611958, 0.02579728, 0.02573722, 0.02610021,
0.02396126, 0.02196357, 0.02250512, 0.02280028, 0.02240124,
0.02233188, 0.02234558, 0.02057889, 0.02079376, 0.02241768,
0.02339308, 0.02256937, 0.02258226],
[0.02664624, 0.0241948 , 0.02345985, 0.0233654 , 0.02439022,
0.02438403, 0.02490249, 0.02541036, 0.02556494, 0.02623063,
0.02611958, 0.02579728, 0.02573722, 0.02610021, 0.02396126,
0.02196357, 0.02250512, 0.02280028, 0.02240124, 0.02233188,
0.02234558, 0.02057889, 0.02079376, 0.02241768, 0.02339308,
0.02256937, 0.02258226, 0.02195136],
[0.0241948 , 0.02345985, 0.0233654 , 0.02439022, 0.02438403,
0.02490249, 0.02541036, 0.02556494, 0.02623063, 0.02611958,
0.02579728, 0.02573722, 0.02610021, 0.02396126, 0.02196357,
0.02250512, 0.02280028, 0.02240124, 0.02233188, 0.02234558,
0.02057889, 0.02079376, 0.02241768, 0.02339308, 0.02256937,
0.02258226, 0.02195136, 0.02562631],
[0.02345985, 0.0233654 , 0.02439022, 0.02438403, 0.02490249,
0.02541036, 0.02556494, 0.02623063, 0.02611958, 0.02579728,
0.02573722, 0.02610021, 0.02396126, 0.02196357, 0.02250512,
0.02280028, 0.02240124, 0.02233188, 0.02234558, 0.02057889,
0.02079376, 0.02241768, 0.02339308, 0.02256937, 0.02258226,
0.02195136, 0.02562631, 0.0259223 ],
[0.0233654 , 0.02439022, 0.02438403, 0.02490249, 0.02541036,
0.02556494, 0.02623063, 0.02611958, 0.02579728, 0.02573722,
0.02610021, 0.02396126, 0.02196357, 0.02250512, 0.02280028,
0.02240124, 0.02233188, 0.02234558, 0.02057889, 0.02079376,
0.02241768, 0.02339308, 0.02256937, 0.02258226, 0.02195136,
0.02562631, 0.0259223 , 0.02513092],
[0.02439022, 0.02438403, 0.02490249, 0.02541036, 0.02556494,
0.02623063, 0.02611958, 0.02579728, 0.02573722, 0.02610021,
0.02396126, 0.02196357, 0.02250512, 0.02280028, 0.02240124,
0.02233188, 0.02234558, 0.02057889, 0.02079376, 0.02241768,
0.02339308, 0.02256937, 0.02258226, 0.02195136, 0.02562631,
0.0259223 , 0.02513092, 0.02521293],
[0.02438403, 0.02490249, 0.02541036, 0.02556494, 0.02623063,
0.02611958, 0.02579728, 0.02573722, 0.02610021, 0.02396126,
0.02196357, 0.02250512, 0.02280028, 0.02240124, 0.02233188,
0.02234558, 0.02057889, 0.02079376, 0.02241768, 0.02339308,
0.02256937, 0.02258226, 0.02195136, 0.02562631, 0.0259223 ,
0.02513092, 0.02521293, 0.03050996],
[0.02490249, 0.02541036, 0.02556494, 0.02623063, 0.02611958,
0.02579728, 0.02573722, 0.02610021, 0.02396126, 0.02196357,
0.02250512, 0.02280028, 0.02240124, 0.02233188, 0.02234558,
0.02057889, 0.02079376, 0.02241768, 0.02339308, 0.02256937,
0.02258226, 0.02195136, 0.02562631, 0.0259223 , 0.02513092,
0.02521293, 0.03050996, 0.03048837],
[0.02541036, 0.02556494, 0.02623063, 0.02611958, 0.02579728,
0.02573722, 0.02610021, 0.02396126, 0.02196357, 0.02250512,
0.02280028, 0.02240124, 0.02233188, 0.02234558, 0.02057889,
0.02079376, 0.02241768, 0.02339308, 0.02256937, 0.02258226,
0.02195136, 0.02562631, 0.0259223 , 0.02513092, 0.02521293,
0.03050996, 0.03048837, 0.03079356],
[0.02556494, 0.02623063, 0.02611958, 0.02579728, 0.02573722,
0.02610021, 0.02396126, 0.02196357, 0.02250512, 0.02280028,
0.02240124, 0.02233188, 0.02234558, 0.02057889, 0.02079376,
0.02241768, 0.02339308, 0.02256937, 0.02258226, 0.02195136,
0.02562631, 0.0259223 , 0.02513092, 0.02521293, 0.03050996,
0.03048837, 0.03079356, 0.03091499],
[0.02623063, 0.02611958, 0.02579728, 0.02573722, 0.02610021,
0.02396126, 0.02196357, 0.02250512, 0.02280028, 0.02240124,
0.02233188, 0.02234558, 0.02057889, 0.02079376, 0.02241768,
0.02339308, 0.02256937, 0.02258226, 0.02195136, 0.02562631,
0.0259223 , 0.02513092, 0.02521293, 0.03050996, 0.03048837,
0.03079356, 0.03091499, 0.03059688],
[0.02611958, 0.02579728, 0.02573722, 0.02610021, 0.02396126,
0.02196357, 0.02250512, 0.02280028, 0.02240124, 0.02233188,
0.02234558, 0.02057889, 0.02079376, 0.02241768, 0.02339308,
0.02256937, 0.02258226, 0.02195136, 0.02562631, 0.0259223 ,
0.02513092, 0.02521293, 0.03050996, 0.03048837, 0.03079356,
0.03091499, 0.03059688, 0.03028503],
[0.02579728, 0.02573722, 0.02610021, 0.02396126, 0.02196357,
0.02250512, 0.02280028, 0.02240124, 0.02233188, 0.02234558,
0.02057889, 0.02079376, 0.02241768, 0.02339308, 0.02256937,
0.02258226, 0.02195136, 0.02562631, 0.0259223 , 0.02513092,
0.02521293, 0.03050996, 0.03048837, 0.03079356, 0.03091499,
0.03059688, 0.03028503, 0.03091824],
[0.02573722, 0.02610021, 0.02396126, 0.02196357, 0.02250512,
0.02280028, 0.02240124, 0.02233188, 0.02234558, 0.02057889,
0.02079376, 0.02241768, 0.02339308, 0.02256937, 0.02258226,
0.02195136, 0.02562631, 0.0259223 , 0.02513092, 0.02521293,
0.03050996, 0.03048837, 0.03079356, 0.03091499, 0.03059688,
0.03028503, 0.03091824, 0.0308798 ],
[0.02610021, 0.02396126, 0.02196357, 0.02250512, 0.02280028,
0.02240124, 0.02233188, 0.02234558, 0.02057889, 0.02079376,
0.02241768, 0.02339308, 0.02256937, 0.02258226, 0.02195136,
0.02562631, 0.0259223 , 0.02513092, 0.02521293, 0.03050996,
0.03048837, 0.03079356, 0.03091499, 0.03059688, 0.03028503,
0.03091824, 0.0308798 , 0.03087893],
[0.02396126, 0.02196357, 0.02250512, 0.02280028, 0.02240124,
0.02233188, 0.02234558, 0.02057889, 0.02079376, 0.02241768,
0.02339308, 0.02256937, 0.02258226, 0.02195136, 0.02562631,
0.0259223 , 0.02513092, 0.02521293, 0.03050996, 0.03048837,
0.03079356, 0.03091499, 0.03059688, 0.03028503, 0.03091824,
0.0308798 , 0.03087893, 0.03087461],
[0.02196357, 0.02250512, 0.02280028, 0.02240124, 0.02233188,
0.02234558, 0.02057889, 0.02079376, 0.02241768, 0.02339308,
0.02256937, 0.02258226, 0.02195136, 0.02562631, 0.0259223 ,
0.02513092, 0.02521293, 0.03050996, 0.03048837, 0.03079356,
0.03091499, 0.03059688, 0.03028503, 0.03091824, 0.0308798 ,
0.03087893, 0.03087461, 0.0307535 ],
[0.02250512, 0.02280028, 0.02240124, 0.02233188, 0.02234558,
0.02057889, 0.02079376, 0.02241768, 0.02339308, 0.02256937,
0.02258226, 0.02195136, 0.02562631, 0.0259223 , 0.02513092,
0.02521293, 0.03050996, 0.03048837, 0.03079356, 0.03091499,
0.03059688, 0.03028503, 0.03091824, 0.0308798 , 0.03087893,
0.03087461, 0.0307535 , 0.03049742],
[0.02280028, 0.02240124, 0.02233188, 0.02234558, 0.02057889,
0.02079376, 0.02241768, 0.02339308, 0.02256937, 0.02258226,
0.02195136, 0.02562631, 0.0259223 , 0.02513092, 0.02521293,
0.03050996, 0.03048837, 0.03079356, 0.03091499, 0.03059688,
0.03028503, 0.03091824, 0.0308798 , 0.03087893, 0.03087461,
0.0307535 , 0.03049742, 0.02901752],
[0.02240124, 0.02233188, 0.02234558, 0.02057889, 0.02079376,
0.02241768, 0.02339308, 0.02256937, 0.02258226, 0.02195136,
0.02562631, 0.0259223 , 0.02513092, 0.02521293, 0.03050996,
0.03048837, 0.03079356, 0.03091499, 0.03059688, 0.03028503,
0.03091824, 0.0308798 , 0.03087893, 0.03087461, 0.0307535 ,
0.03049742, 0.02901752, 0.02797885],
[0.02233188, 0.02234558, 0.02057889, 0.02079376, 0.02241768,
0.02339308, 0.02256937, 0.02258226, 0.02195136, 0.02562631,
0.0259223 , 0.02513092, 0.02521293, 0.03050996, 0.03048837,
0.03079356, 0.03091499, 0.03059688, 0.03028503, 0.03091824,
0.0308798 , 0.03087893, 0.03087461, 0.0307535 , 0.03049742,
0.02901752, 0.02797885, 0.03014141],
[0.02234558, 0.02057889, 0.02079376, 0.02241768, 0.02339308,
0.02256937, 0.02258226, 0.02195136, 0.02562631, 0.0259223 ,
0.02513092, 0.02521293, 0.03050996, 0.03048837, 0.03079356,
0.03091499, 0.03059688, 0.03028503, 0.03091824, 0.0308798 ,
0.03087893, 0.03087461, 0.0307535 , 0.03049742, 0.02901752,
0.02797885, 0.03014141, 0.0302081 ]])
y_val_vola21_28
array([[0.02057889],
[0.02079376],
[0.02241768],
[0.02339308],
[0.02256937],
[0.02258226],
[0.02195136],
[0.02562631],
[0.0259223 ],
[0.02513092],
[0.02521293],
[0.03050996],
[0.03048837],
[0.03079356],
[0.03091499],
[0.03059688],
[0.03028503],
[0.03091824],
[0.0308798 ],
[0.03087893],
[0.03087461],
[0.0307535 ],
[0.03049742],
[0.02901752],
[0.02797885],
[0.03014141],
[0.0302081 ],
[0.0302079 ]])
X_test_vola21_28
array([[0.02057889, 0.02079376, 0.02241768, 0.02339308, 0.02256937,
0.02258226, 0.02195136, 0.02562631, 0.0259223 , 0.02513092,
0.02521293, 0.03050996, 0.03048837, 0.03079356, 0.03091499,
0.03059688, 0.03028503, 0.03091824, 0.0308798 , 0.03087893,
0.03087461, 0.0307535 , 0.03049742, 0.02901752, 0.02797885,
0.03014141, 0.0302081 , 0.0302079 ],
[0.02079376, 0.02241768, 0.02339308, 0.02256937, 0.02258226,
0.02195136, 0.02562631, 0.0259223 , 0.02513092, 0.02521293,
0.03050996, 0.03048837, 0.03079356, 0.03091499, 0.03059688,
0.03028503, 0.03091824, 0.0308798 , 0.03087893, 0.03087461,
0.0307535 , 0.03049742, 0.02901752, 0.02797885, 0.03014141,
0.0302081 , 0.0302079 , 0.02663401],
[0.02241768, 0.02339308, 0.02256937, 0.02258226, 0.02195136,
0.02562631, 0.0259223 , 0.02513092, 0.02521293, 0.03050996,
0.03048837, 0.03079356, 0.03091499, 0.03059688, 0.03028503,
0.03091824, 0.0308798 , 0.03087893, 0.03087461, 0.0307535 ,
0.03049742, 0.02901752, 0.02797885, 0.03014141, 0.0302081 ,
0.0302079 , 0.02663401, 0.02645964],
[0.02339308, 0.02256937, 0.02258226, 0.02195136, 0.02562631,
0.0259223 , 0.02513092, 0.02521293, 0.03050996, 0.03048837,
0.03079356, 0.03091499, 0.03059688, 0.03028503, 0.03091824,
0.0308798 , 0.03087893, 0.03087461, 0.0307535 , 0.03049742,
0.02901752, 0.02797885, 0.03014141, 0.0302081 , 0.0302079 ,
0.02663401, 0.02645964, 0.0262877 ],
[0.02256937, 0.02258226, 0.02195136, 0.02562631, 0.0259223 ,
0.02513092, 0.02521293, 0.03050996, 0.03048837, 0.03079356,
0.03091499, 0.03059688, 0.03028503, 0.03091824, 0.0308798 ,
0.03087893, 0.03087461, 0.0307535 , 0.03049742, 0.02901752,
0.02797885, 0.03014141, 0.0302081 , 0.0302079 , 0.02663401,
0.02645964, 0.0262877 , 0.02650421],
[0.02258226, 0.02195136, 0.02562631, 0.0259223 , 0.02513092,
0.02521293, 0.03050996, 0.03048837, 0.03079356, 0.03091499,
0.03059688, 0.03028503, 0.03091824, 0.0308798 , 0.03087893,
0.03087461, 0.0307535 , 0.03049742, 0.02901752, 0.02797885,
0.03014141, 0.0302081 , 0.0302079 , 0.02663401, 0.02645964,
0.0262877 , 0.02650421, 0.02063035],
[0.02195136, 0.02562631, 0.0259223 , 0.02513092, 0.02521293,
0.03050996, 0.03048837, 0.03079356, 0.03091499, 0.03059688,
0.03028503, 0.03091824, 0.0308798 , 0.03087893, 0.03087461,
0.0307535 , 0.03049742, 0.02901752, 0.02797885, 0.03014141,
0.0302081 , 0.0302079 , 0.02663401, 0.02645964, 0.0262877 ,
0.02650421, 0.02063035, 0.02065898],
[0.02562631, 0.0259223 , 0.02513092, 0.02521293, 0.03050996,
0.03048837, 0.03079356, 0.03091499, 0.03059688, 0.03028503,
0.03091824, 0.0308798 , 0.03087893, 0.03087461, 0.0307535 ,
0.03049742, 0.02901752, 0.02797885, 0.03014141, 0.0302081 ,
0.0302079 , 0.02663401, 0.02645964, 0.0262877 , 0.02650421,
0.02063035, 0.02065898, 0.01992985],
[0.0259223 , 0.02513092, 0.02521293, 0.03050996, 0.03048837,
0.03079356, 0.03091499, 0.03059688, 0.03028503, 0.03091824,
0.0308798 , 0.03087893, 0.03087461, 0.0307535 , 0.03049742,
0.02901752, 0.02797885, 0.03014141, 0.0302081 , 0.0302079 ,
0.02663401, 0.02645964, 0.0262877 , 0.02650421, 0.02063035,
0.02065898, 0.01992985, 0.01954443],
[0.02513092, 0.02521293, 0.03050996, 0.03048837, 0.03079356,
0.03091499, 0.03059688, 0.03028503, 0.03091824, 0.0308798 ,
0.03087893, 0.03087461, 0.0307535 , 0.03049742, 0.02901752,
0.02797885, 0.03014141, 0.0302081 , 0.0302079 , 0.02663401,
0.02645964, 0.0262877 , 0.02650421, 0.02063035, 0.02065898,
0.01992985, 0.01954443, 0.01936915],
[0.02521293, 0.03050996, 0.03048837, 0.03079356, 0.03091499,
0.03059688, 0.03028503, 0.03091824, 0.0308798 , 0.03087893,
0.03087461, 0.0307535 , 0.03049742, 0.02901752, 0.02797885,
0.03014141, 0.0302081 , 0.0302079 , 0.02663401, 0.02645964,
0.0262877 , 0.02650421, 0.02063035, 0.02065898, 0.01992985,
0.01954443, 0.01936915, 0.02024194],
[0.03050996, 0.03048837, 0.03079356, 0.03091499, 0.03059688,
0.03028503, 0.03091824, 0.0308798 , 0.03087893, 0.03087461,
0.0307535 , 0.03049742, 0.02901752, 0.02797885, 0.03014141,
0.0302081 , 0.0302079 , 0.02663401, 0.02645964, 0.0262877 ,
0.02650421, 0.02063035, 0.02065898, 0.01992985, 0.01954443,
0.01936915, 0.02024194, 0.01884434],
[0.03048837, 0.03079356, 0.03091499, 0.03059688, 0.03028503,
0.03091824, 0.0308798 , 0.03087893, 0.03087461, 0.0307535 ,
0.03049742, 0.02901752, 0.02797885, 0.03014141, 0.0302081 ,
0.0302079 , 0.02663401, 0.02645964, 0.0262877 , 0.02650421,
0.02063035, 0.02065898, 0.01992985, 0.01954443, 0.01936915,
0.02024194, 0.01884434, 0.0204051 ],
[0.03079356, 0.03091499, 0.03059688, 0.03028503, 0.03091824,
0.0308798 , 0.03087893, 0.03087461, 0.0307535 , 0.03049742,
0.02901752, 0.02797885, 0.03014141, 0.0302081 , 0.0302079 ,
0.02663401, 0.02645964, 0.0262877 , 0.02650421, 0.02063035,
0.02065898, 0.01992985, 0.01954443, 0.01936915, 0.02024194,
0.01884434, 0.0204051 , 0.02043112],
[0.03091499, 0.03059688, 0.03028503, 0.03091824, 0.0308798 ,
0.03087893, 0.03087461, 0.0307535 , 0.03049742, 0.02901752,
0.02797885, 0.03014141, 0.0302081 , 0.0302079 , 0.02663401,
0.02645964, 0.0262877 , 0.02650421, 0.02063035, 0.02065898,
0.01992985, 0.01954443, 0.01936915, 0.02024194, 0.01884434,
0.0204051 , 0.02043112, 0.02033406],
[0.03059688, 0.03028503, 0.03091824, 0.0308798 , 0.03087893,
0.03087461, 0.0307535 , 0.03049742, 0.02901752, 0.02797885,
0.03014141, 0.0302081 , 0.0302079 , 0.02663401, 0.02645964,
0.0262877 , 0.02650421, 0.02063035, 0.02065898, 0.01992985,
0.01954443, 0.01936915, 0.02024194, 0.01884434, 0.0204051 ,
0.02043112, 0.02033406, 0.01663176],
[0.03028503, 0.03091824, 0.0308798 , 0.03087893, 0.03087461,
0.0307535 , 0.03049742, 0.02901752, 0.02797885, 0.03014141,
0.0302081 , 0.0302079 , 0.02663401, 0.02645964, 0.0262877 ,
0.02650421, 0.02063035, 0.02065898, 0.01992985, 0.01954443,
0.01936915, 0.02024194, 0.01884434, 0.0204051 , 0.02043112,
0.02033406, 0.01663176, 0.01697907],
[0.03091824, 0.0308798 , 0.03087893, 0.03087461, 0.0307535 ,
0.03049742, 0.02901752, 0.02797885, 0.03014141, 0.0302081 ,
0.0302079 , 0.02663401, 0.02645964, 0.0262877 , 0.02650421,
0.02063035, 0.02065898, 0.01992985, 0.01954443, 0.01936915,
0.02024194, 0.01884434, 0.0204051 , 0.02043112, 0.02033406,
0.01663176, 0.01697907, 0.01819758],
[0.0308798 , 0.03087893, 0.03087461, 0.0307535 , 0.03049742,
0.02901752, 0.02797885, 0.03014141, 0.0302081 , 0.0302079 ,
0.02663401, 0.02645964, 0.0262877 , 0.02650421, 0.02063035,
0.02065898, 0.01992985, 0.01954443, 0.01936915, 0.02024194,
0.01884434, 0.0204051 , 0.02043112, 0.02033406, 0.01663176,
0.01697907, 0.01819758, 0.01797544],
[0.03087893, 0.03087461, 0.0307535 , 0.03049742, 0.02901752,
0.02797885, 0.03014141, 0.0302081 , 0.0302079 , 0.02663401,
0.02645964, 0.0262877 , 0.02650421, 0.02063035, 0.02065898,
0.01992985, 0.01954443, 0.01936915, 0.02024194, 0.01884434,
0.0204051 , 0.02043112, 0.02033406, 0.01663176, 0.01697907,
0.01819758, 0.01797544, 0.01623343],
[0.03087461, 0.0307535 , 0.03049742, 0.02901752, 0.02797885,
0.03014141, 0.0302081 , 0.0302079 , 0.02663401, 0.02645964,
0.0262877 , 0.02650421, 0.02063035, 0.02065898, 0.01992985,
0.01954443, 0.01936915, 0.02024194, 0.01884434, 0.0204051 ,
0.02043112, 0.02033406, 0.01663176, 0.01697907, 0.01819758,
0.01797544, 0.01623343, 0.01680528],
[0.0307535 , 0.03049742, 0.02901752, 0.02797885, 0.03014141,
0.0302081 , 0.0302079 , 0.02663401, 0.02645964, 0.0262877 ,
0.02650421, 0.02063035, 0.02065898, 0.01992985, 0.01954443,
0.01936915, 0.02024194, 0.01884434, 0.0204051 , 0.02043112,
0.02033406, 0.01663176, 0.01697907, 0.01819758, 0.01797544,
0.01623343, 0.01680528, 0.01658037],
[0.03049742, 0.02901752, 0.02797885, 0.03014141, 0.0302081 ,
0.0302079 , 0.02663401, 0.02645964, 0.0262877 , 0.02650421,
0.02063035, 0.02065898, 0.01992985, 0.01954443, 0.01936915,
0.02024194, 0.01884434, 0.0204051 , 0.02043112, 0.02033406,
0.01663176, 0.01697907, 0.01819758, 0.01797544, 0.01623343,
0.01680528, 0.01658037, 0.01631993],
[0.02901752, 0.02797885, 0.03014141, 0.0302081 , 0.0302079 ,
0.02663401, 0.02645964, 0.0262877 , 0.02650421, 0.02063035,
0.02065898, 0.01992985, 0.01954443, 0.01936915, 0.02024194,
0.01884434, 0.0204051 , 0.02043112, 0.02033406, 0.01663176,
0.01697907, 0.01819758, 0.01797544, 0.01623343, 0.01680528,
0.01658037, 0.01631993, 0.0158569 ],
[0.02797885, 0.03014141, 0.0302081 , 0.0302079 , 0.02663401,
0.02645964, 0.0262877 , 0.02650421, 0.02063035, 0.02065898,
0.01992985, 0.01954443, 0.01936915, 0.02024194, 0.01884434,
0.0204051 , 0.02043112, 0.02033406, 0.01663176, 0.01697907,
0.01819758, 0.01797544, 0.01623343, 0.01680528, 0.01658037,
0.01631993, 0.0158569 , 0.01581449],
[0.03014141, 0.0302081 , 0.0302079 , 0.02663401, 0.02645964,
0.0262877 , 0.02650421, 0.02063035, 0.02065898, 0.01992985,
0.01954443, 0.01936915, 0.02024194, 0.01884434, 0.0204051 ,
0.02043112, 0.02033406, 0.01663176, 0.01697907, 0.01819758,
0.01797544, 0.01623343, 0.01680528, 0.01658037, 0.01631993,
0.0158569 , 0.01581449, 0.01638668],
[0.0302081 , 0.0302079 , 0.02663401, 0.02645964, 0.0262877 ,
0.02650421, 0.02063035, 0.02065898, 0.01992985, 0.01954443,
0.01936915, 0.02024194, 0.01884434, 0.0204051 , 0.02043112,
0.02033406, 0.01663176, 0.01697907, 0.01819758, 0.01797544,
0.01623343, 0.01680528, 0.01658037, 0.01631993, 0.0158569 ,
0.01581449, 0.01638668, 0.01690949],
[0.0302079 , 0.02663401, 0.02645964, 0.0262877 , 0.02650421,
0.02063035, 0.02065898, 0.01992985, 0.01954443, 0.01936915,
0.02024194, 0.01884434, 0.0204051 , 0.02043112, 0.02033406,
0.01663176, 0.01697907, 0.01819758, 0.01797544, 0.01623343,
0.01680528, 0.01658037, 0.01631993, 0.0158569 , 0.01581449,
0.01638668, 0.01690949, 0.01676634]])
y_test_vola21_28
array([[0.02663401],
[0.02645964],
[0.0262877 ],
[0.02650421],
[0.02063035],
[0.02065898],
[0.01992985],
[0.01954443],
[0.01936915],
[0.02024194],
[0.01884434],
[0.0204051 ],
[0.02043112],
[0.02033406],
[0.01663176],
[0.01697907],
[0.01819758],
[0.01797544],
[0.01623343],
[0.01680528],
[0.01658037],
[0.01631993],
[0.0158569 ],
[0.01581449],
[0.01638668],
[0.01690949],
[0.01676634],
[0.01627876]])
\(\omega = 28\)
Horizonte de 7 días (\(\tau=7\))
X_train_vola28_7, y_train_vola28_7, X_val_vola28_7, y_val_vola28_7, X_test_vola28_7, y_test_vola28_7 = create_time_series_datasets(df_1_st,'Volatilidad_28',tau7)
# Imprimir las dimensiones
print("X_train shape:", X_train_vola28_7.shape)
print("y_train shape:", y_train_vola28_7.shape)
print("X_val shape:", X_val_vola28_7.shape)
print("y_val shape:", y_val_vola28_7.shape)
print("X_test shape:", X_test_vola28_7.shape)
print("y_test shape:", y_test_vola28_7.shape)
X_train shape: (4971, 7)
y_train shape: (4971, 1)
X_val shape: (7, 7)
y_val shape: (7, 1)
X_test shape: (7, 7)
y_test shape: (7, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 7\).
X_train_vola28_7
array([[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
...,
[0.01594814, 0.01604526, 0.01596223, ..., 0.01883111, 0.0243227 ,
0.02519831],
[0.01604526, 0.01596223, 0.01770891, ..., 0.0243227 , 0.02519831,
0.02515081],
[0.01596223, 0.01770891, 0.01883111, ..., 0.02519831, 0.02515081,
0.02520881]])
y_train_vola28_7
array([[0. ],
[0. ],
[0. ],
...,
[0.02515081],
[0.02520881],
[0.02480357]])
X_val_vola28_7
array([[0.01770891, 0.01883111, 0.0243227 , 0.02519831, 0.02515081,
0.02520881, 0.02480357],
[0.01883111, 0.0243227 , 0.02519831, 0.02515081, 0.02520881,
0.02480357, 0.02770477],
[0.0243227 , 0.02519831, 0.02515081, 0.02520881, 0.02480357,
0.02770477, 0.03181078],
[0.02519831, 0.02515081, 0.02520881, 0.02480357, 0.02770477,
0.03181078, 0.03195106],
[0.02515081, 0.02520881, 0.02480357, 0.02770477, 0.03181078,
0.03195106, 0.03192889],
[0.02520881, 0.02480357, 0.02770477, 0.03181078, 0.03195106,
0.03192889, 0.0315409 ],
[0.02480357, 0.02770477, 0.03181078, 0.03195106, 0.03192889,
0.0315409 , 0.03160804]])
y_val_vola28_7
array([[0.02770477],
[0.03181078],
[0.03195106],
[0.03192889],
[0.0315409 ],
[0.03160804],
[0.03161677]])
X_test_vola28_7
array([[0.02770477, 0.03181078, 0.03195106, 0.03192889, 0.0315409 ,
0.03160804, 0.03161677],
[0.03181078, 0.03195106, 0.03192889, 0.0315409 , 0.03160804,
0.03161677, 0.03195545],
[0.03195106, 0.03192889, 0.0315409 , 0.03160804, 0.03161677,
0.03195545, 0.03205401],
[0.03192889, 0.0315409 , 0.03160804, 0.03161677, 0.03195545,
0.03205401, 0.0316344 ],
[0.0315409 , 0.03160804, 0.03161677, 0.03195545, 0.03205401,
0.0316344 , 0.03230006],
[0.03160804, 0.03161677, 0.03195545, 0.03205401, 0.0316344 ,
0.03230006, 0.03310511],
[0.03161677, 0.03195545, 0.03205401, 0.0316344 , 0.03230006,
0.03310511, 0.03552294]])
y_test_vola28_7
array([[0.03195545],
[0.03205401],
[0.0316344 ],
[0.03230006],
[0.03310511],
[0.03552294],
[0.03624507]])
Horizonte de 14 días (\(\tau=14\))
X_train_vola28_14, y_train_vola28_14, X_val_vola28_14, y_val_vola28_14, X_test_vola28_14, y_test_vola28_14 = create_time_series_datasets(df_1_st,'Volatilidad_28',tau14)
# Imprimir las dimensiones
print("X_train shape:", X_train_vola28_14.shape)
print("y_train shape:", y_train_vola28_14.shape)
print("X_val shape:", X_val_vola28_14.shape)
print("y_val shape:", y_val_vola28_14.shape)
print("X_test shape:", X_test_vola28_14.shape)
print("y_test shape:", y_test_vola28_14.shape)
X_train shape: (4943, 14)
y_train shape: (4943, 1)
X_val shape: (14, 14)
y_val shape: (14, 1)
X_test shape: (14, 14)
y_test shape: (14, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 14\).
X_train_vola28_14
array([[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
...,
[0.02967051, 0.02970099, 0.0296995 , ..., 0.02316793, 0.0237941 ,
0.0241668 ],
[0.02970099, 0.0296995 , 0.02898439, ..., 0.0237941 , 0.0241668 ,
0.02046467],
[0.0296995 , 0.02898439, 0.02881245, ..., 0.0241668 , 0.02046467,
0.02053574]])
y_train_vola28_14
array([[0. ],
[0. ],
[0. ],
...,
[0.02046467],
[0.02053574],
[0.01971268]])
X_val_vola28_14
array([[0.02898439, 0.02881245, 0.02743928, 0.02688038, 0.02688689,
0.0268874 , 0.02694642, 0.02323871, 0.02316793, 0.0237941 ,
0.0241668 , 0.02046467, 0.02053574, 0.01971268],
[0.02881245, 0.02743928, 0.02688038, 0.02688689, 0.0268874 ,
0.02694642, 0.02323871, 0.02316793, 0.0237941 , 0.0241668 ,
0.02046467, 0.02053574, 0.01971268, 0.02033148],
[0.02743928, 0.02688038, 0.02688689, 0.0268874 , 0.02694642,
0.02323871, 0.02316793, 0.0237941 , 0.0241668 , 0.02046467,
0.02053574, 0.01971268, 0.02033148, 0.02034604],
[0.02688038, 0.02688689, 0.0268874 , 0.02694642, 0.02323871,
0.02316793, 0.0237941 , 0.0241668 , 0.02046467, 0.02053574,
0.01971268, 0.02033148, 0.02034604, 0.02126317],
[0.02688689, 0.0268874 , 0.02694642, 0.02323871, 0.02316793,
0.0237941 , 0.0241668 , 0.02046467, 0.02053574, 0.01971268,
0.02033148, 0.02034604, 0.02126317, 0.01967574],
[0.0268874 , 0.02694642, 0.02323871, 0.02316793, 0.0237941 ,
0.0241668 , 0.02046467, 0.02053574, 0.01971268, 0.02033148,
0.02034604, 0.02126317, 0.01967574, 0.01968943],
[0.02694642, 0.02323871, 0.02316793, 0.0237941 , 0.0241668 ,
0.02046467, 0.02053574, 0.01971268, 0.02033148, 0.02034604,
0.02126317, 0.01967574, 0.01968943, 0.01992829],
[0.02323871, 0.02316793, 0.0237941 , 0.0241668 , 0.02046467,
0.02053574, 0.01971268, 0.02033148, 0.02034604, 0.02126317,
0.01967574, 0.01968943, 0.01992829, 0.01982134],
[0.02316793, 0.0237941 , 0.0241668 , 0.02046467, 0.02053574,
0.01971268, 0.02033148, 0.02034604, 0.02126317, 0.01967574,
0.01968943, 0.01992829, 0.01982134, 0.01667527],
[0.0237941 , 0.0241668 , 0.02046467, 0.02053574, 0.01971268,
0.02033148, 0.02034604, 0.02126317, 0.01967574, 0.01968943,
0.01992829, 0.01982134, 0.01667527, 0.0166734 ],
[0.0241668 , 0.02046467, 0.02053574, 0.01971268, 0.02033148,
0.02034604, 0.02126317, 0.01967574, 0.01968943, 0.01992829,
0.01982134, 0.01667527, 0.0166734 , 0.01698105],
[0.02046467, 0.02053574, 0.01971268, 0.02033148, 0.02034604,
0.02126317, 0.01967574, 0.01968943, 0.01992829, 0.01982134,
0.01667527, 0.0166734 , 0.01698105, 0.01721507],
[0.02053574, 0.01971268, 0.02033148, 0.02034604, 0.02126317,
0.01967574, 0.01968943, 0.01992829, 0.01982134, 0.01667527,
0.0166734 , 0.01698105, 0.01721507, 0.01594814],
[0.01971268, 0.02033148, 0.02034604, 0.02126317, 0.01967574,
0.01968943, 0.01992829, 0.01982134, 0.01667527, 0.0166734 ,
0.01698105, 0.01721507, 0.01594814, 0.01604526]])
y_val_vola28_14
array([[0.02033148],
[0.02034604],
[0.02126317],
[0.01967574],
[0.01968943],
[0.01992829],
[0.01982134],
[0.01667527],
[0.0166734 ],
[0.01698105],
[0.01721507],
[0.01594814],
[0.01604526],
[0.01596223]])
X_test_vola28_14
array([[0.02033148, 0.02034604, 0.02126317, 0.01967574, 0.01968943,
0.01992829, 0.01982134, 0.01667527, 0.0166734 , 0.01698105,
0.01721507, 0.01594814, 0.01604526, 0.01596223],
[0.02034604, 0.02126317, 0.01967574, 0.01968943, 0.01992829,
0.01982134, 0.01667527, 0.0166734 , 0.01698105, 0.01721507,
0.01594814, 0.01604526, 0.01596223, 0.01770891],
[0.02126317, 0.01967574, 0.01968943, 0.01992829, 0.01982134,
0.01667527, 0.0166734 , 0.01698105, 0.01721507, 0.01594814,
0.01604526, 0.01596223, 0.01770891, 0.01883111],
[0.01967574, 0.01968943, 0.01992829, 0.01982134, 0.01667527,
0.0166734 , 0.01698105, 0.01721507, 0.01594814, 0.01604526,
0.01596223, 0.01770891, 0.01883111, 0.0243227 ],
[0.01968943, 0.01992829, 0.01982134, 0.01667527, 0.0166734 ,
0.01698105, 0.01721507, 0.01594814, 0.01604526, 0.01596223,
0.01770891, 0.01883111, 0.0243227 , 0.02519831],
[0.01992829, 0.01982134, 0.01667527, 0.0166734 , 0.01698105,
0.01721507, 0.01594814, 0.01604526, 0.01596223, 0.01770891,
0.01883111, 0.0243227 , 0.02519831, 0.02515081],
[0.01982134, 0.01667527, 0.0166734 , 0.01698105, 0.01721507,
0.01594814, 0.01604526, 0.01596223, 0.01770891, 0.01883111,
0.0243227 , 0.02519831, 0.02515081, 0.02520881],
[0.01667527, 0.0166734 , 0.01698105, 0.01721507, 0.01594814,
0.01604526, 0.01596223, 0.01770891, 0.01883111, 0.0243227 ,
0.02519831, 0.02515081, 0.02520881, 0.02480357],
[0.0166734 , 0.01698105, 0.01721507, 0.01594814, 0.01604526,
0.01596223, 0.01770891, 0.01883111, 0.0243227 , 0.02519831,
0.02515081, 0.02520881, 0.02480357, 0.02770477],
[0.01698105, 0.01721507, 0.01594814, 0.01604526, 0.01596223,
0.01770891, 0.01883111, 0.0243227 , 0.02519831, 0.02515081,
0.02520881, 0.02480357, 0.02770477, 0.03181078],
[0.01721507, 0.01594814, 0.01604526, 0.01596223, 0.01770891,
0.01883111, 0.0243227 , 0.02519831, 0.02515081, 0.02520881,
0.02480357, 0.02770477, 0.03181078, 0.03195106],
[0.01594814, 0.01604526, 0.01596223, 0.01770891, 0.01883111,
0.0243227 , 0.02519831, 0.02515081, 0.02520881, 0.02480357,
0.02770477, 0.03181078, 0.03195106, 0.03192889],
[0.01604526, 0.01596223, 0.01770891, 0.01883111, 0.0243227 ,
0.02519831, 0.02515081, 0.02520881, 0.02480357, 0.02770477,
0.03181078, 0.03195106, 0.03192889, 0.0315409 ],
[0.01596223, 0.01770891, 0.01883111, 0.0243227 , 0.02519831,
0.02515081, 0.02520881, 0.02480357, 0.02770477, 0.03181078,
0.03195106, 0.03192889, 0.0315409 , 0.03160804]])
y_test_vola28_14
array([[0.01770891],
[0.01883111],
[0.0243227 ],
[0.02519831],
[0.02515081],
[0.02520881],
[0.02480357],
[0.02770477],
[0.03181078],
[0.03195106],
[0.03192889],
[0.0315409 ],
[0.03160804],
[0.03161677]])
Horizonte de 21 días (\(\tau=21\))
X_train_vola28_21, y_train_vola28_21, X_val_vola28_21, y_val_vola28_21, X_test_vola28_21, y_test_vola28_21 = create_time_series_datasets(df_1_st,'Volatilidad_28',tau21)
# Imprimir las dimensiones
print("X_train shape:", X_train_vola28_21.shape)
print("y_train shape:", y_train_vola28_21.shape)
print("X_val shape:", X_val_vola28_21.shape)
print("y_val shape:", y_val_vola28_21.shape)
print("X_test shape:", X_test_vola28_21.shape)
print("y_test shape:", y_test_vola28_21.shape)
X_train shape: (4915, 21)
y_train shape: (4915, 1)
X_val shape: (21, 21)
y_val shape: (21, 1)
X_test shape: (21, 21)
y_test shape: (21, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 21\).
X_train_vola28_21
array([[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
...,
[0.02434324, 0.02412522, 0.02404063, ..., 0.0278551 , 0.02718499,
0.02791418],
[0.02412522, 0.02404063, 0.02366682, ..., 0.02718499, 0.02791418,
0.02797342],
[0.02404063, 0.02366682, 0.02197955, ..., 0.02791418, 0.02797342,
0.02796468]])
y_train_vola28_21
array([[0. ],
[0. ],
[0. ],
...,
[0.02797342],
[0.02796468],
[0.02780442]])
X_val_vola28_21
array([[0.02366682, 0.02197955, 0.02373749, 0.02435859, 0.02405806,
0.02398615, 0.02398642, 0.02455684, 0.02492649, 0.02428771,
0.02435126, 0.02809308, 0.02805948, 0.02820158, 0.02778743,
0.0278551 , 0.02718499, 0.02791418, 0.02797342, 0.02796468,
0.02780442],
[0.02197955, 0.02373749, 0.02435859, 0.02405806, 0.02398615,
0.02398642, 0.02455684, 0.02492649, 0.02428771, 0.02435126,
0.02809308, 0.02805948, 0.02820158, 0.02778743, 0.0278551 ,
0.02718499, 0.02791418, 0.02797342, 0.02796468, 0.02780442,
0.02906891],
[0.02373749, 0.02435859, 0.02405806, 0.02398615, 0.02398642,
0.02455684, 0.02492649, 0.02428771, 0.02435126, 0.02809308,
0.02805948, 0.02820158, 0.02778743, 0.0278551 , 0.02718499,
0.02791418, 0.02797342, 0.02796468, 0.02780442, 0.02906891,
0.02883512],
[0.02435859, 0.02405806, 0.02398615, 0.02398642, 0.02455684,
0.02492649, 0.02428771, 0.02435126, 0.02809308, 0.02805948,
0.02820158, 0.02778743, 0.0278551 , 0.02718499, 0.02791418,
0.02797342, 0.02796468, 0.02780442, 0.02906891, 0.02883512,
0.02849185],
[0.02405806, 0.02398615, 0.02398642, 0.02455684, 0.02492649,
0.02428771, 0.02435126, 0.02809308, 0.02805948, 0.02820158,
0.02778743, 0.0278551 , 0.02718499, 0.02791418, 0.02797342,
0.02796468, 0.02780442, 0.02906891, 0.02883512, 0.02849185,
0.02828838],
[0.02398615, 0.02398642, 0.02455684, 0.02492649, 0.02428771,
0.02435126, 0.02809308, 0.02805948, 0.02820158, 0.02778743,
0.0278551 , 0.02718499, 0.02791418, 0.02797342, 0.02796468,
0.02780442, 0.02906891, 0.02883512, 0.02849185, 0.02828838,
0.02967051],
[0.02398642, 0.02455684, 0.02492649, 0.02428771, 0.02435126,
0.02809308, 0.02805948, 0.02820158, 0.02778743, 0.0278551 ,
0.02718499, 0.02791418, 0.02797342, 0.02796468, 0.02780442,
0.02906891, 0.02883512, 0.02849185, 0.02828838, 0.02967051,
0.02970099],
[0.02455684, 0.02492649, 0.02428771, 0.02435126, 0.02809308,
0.02805948, 0.02820158, 0.02778743, 0.0278551 , 0.02718499,
0.02791418, 0.02797342, 0.02796468, 0.02780442, 0.02906891,
0.02883512, 0.02849185, 0.02828838, 0.02967051, 0.02970099,
0.0296995 ],
[0.02492649, 0.02428771, 0.02435126, 0.02809308, 0.02805948,
0.02820158, 0.02778743, 0.0278551 , 0.02718499, 0.02791418,
0.02797342, 0.02796468, 0.02780442, 0.02906891, 0.02883512,
0.02849185, 0.02828838, 0.02967051, 0.02970099, 0.0296995 ,
0.02898439],
[0.02428771, 0.02435126, 0.02809308, 0.02805948, 0.02820158,
0.02778743, 0.0278551 , 0.02718499, 0.02791418, 0.02797342,
0.02796468, 0.02780442, 0.02906891, 0.02883512, 0.02849185,
0.02828838, 0.02967051, 0.02970099, 0.0296995 , 0.02898439,
0.02881245],
[0.02435126, 0.02809308, 0.02805948, 0.02820158, 0.02778743,
0.0278551 , 0.02718499, 0.02791418, 0.02797342, 0.02796468,
0.02780442, 0.02906891, 0.02883512, 0.02849185, 0.02828838,
0.02967051, 0.02970099, 0.0296995 , 0.02898439, 0.02881245,
0.02743928],
[0.02809308, 0.02805948, 0.02820158, 0.02778743, 0.0278551 ,
0.02718499, 0.02791418, 0.02797342, 0.02796468, 0.02780442,
0.02906891, 0.02883512, 0.02849185, 0.02828838, 0.02967051,
0.02970099, 0.0296995 , 0.02898439, 0.02881245, 0.02743928,
0.02688038],
[0.02805948, 0.02820158, 0.02778743, 0.0278551 , 0.02718499,
0.02791418, 0.02797342, 0.02796468, 0.02780442, 0.02906891,
0.02883512, 0.02849185, 0.02828838, 0.02967051, 0.02970099,
0.0296995 , 0.02898439, 0.02881245, 0.02743928, 0.02688038,
0.02688689],
[0.02820158, 0.02778743, 0.0278551 , 0.02718499, 0.02791418,
0.02797342, 0.02796468, 0.02780442, 0.02906891, 0.02883512,
0.02849185, 0.02828838, 0.02967051, 0.02970099, 0.0296995 ,
0.02898439, 0.02881245, 0.02743928, 0.02688038, 0.02688689,
0.0268874 ],
[0.02778743, 0.0278551 , 0.02718499, 0.02791418, 0.02797342,
0.02796468, 0.02780442, 0.02906891, 0.02883512, 0.02849185,
0.02828838, 0.02967051, 0.02970099, 0.0296995 , 0.02898439,
0.02881245, 0.02743928, 0.02688038, 0.02688689, 0.0268874 ,
0.02694642],
[0.0278551 , 0.02718499, 0.02791418, 0.02797342, 0.02796468,
0.02780442, 0.02906891, 0.02883512, 0.02849185, 0.02828838,
0.02967051, 0.02970099, 0.0296995 , 0.02898439, 0.02881245,
0.02743928, 0.02688038, 0.02688689, 0.0268874 , 0.02694642,
0.02323871],
[0.02718499, 0.02791418, 0.02797342, 0.02796468, 0.02780442,
0.02906891, 0.02883512, 0.02849185, 0.02828838, 0.02967051,
0.02970099, 0.0296995 , 0.02898439, 0.02881245, 0.02743928,
0.02688038, 0.02688689, 0.0268874 , 0.02694642, 0.02323871,
0.02316793],
[0.02791418, 0.02797342, 0.02796468, 0.02780442, 0.02906891,
0.02883512, 0.02849185, 0.02828838, 0.02967051, 0.02970099,
0.0296995 , 0.02898439, 0.02881245, 0.02743928, 0.02688038,
0.02688689, 0.0268874 , 0.02694642, 0.02323871, 0.02316793,
0.0237941 ],
[0.02797342, 0.02796468, 0.02780442, 0.02906891, 0.02883512,
0.02849185, 0.02828838, 0.02967051, 0.02970099, 0.0296995 ,
0.02898439, 0.02881245, 0.02743928, 0.02688038, 0.02688689,
0.0268874 , 0.02694642, 0.02323871, 0.02316793, 0.0237941 ,
0.0241668 ],
[0.02796468, 0.02780442, 0.02906891, 0.02883512, 0.02849185,
0.02828838, 0.02967051, 0.02970099, 0.0296995 , 0.02898439,
0.02881245, 0.02743928, 0.02688038, 0.02688689, 0.0268874 ,
0.02694642, 0.02323871, 0.02316793, 0.0237941 , 0.0241668 ,
0.02046467],
[0.02780442, 0.02906891, 0.02883512, 0.02849185, 0.02828838,
0.02967051, 0.02970099, 0.0296995 , 0.02898439, 0.02881245,
0.02743928, 0.02688038, 0.02688689, 0.0268874 , 0.02694642,
0.02323871, 0.02316793, 0.0237941 , 0.0241668 , 0.02046467,
0.02053574]])
y_val_vola28_21
array([[0.02906891],
[0.02883512],
[0.02849185],
[0.02828838],
[0.02967051],
[0.02970099],
[0.0296995 ],
[0.02898439],
[0.02881245],
[0.02743928],
[0.02688038],
[0.02688689],
[0.0268874 ],
[0.02694642],
[0.02323871],
[0.02316793],
[0.0237941 ],
[0.0241668 ],
[0.02046467],
[0.02053574],
[0.01971268]])
X_test_vola28_21
array([[0.02906891, 0.02883512, 0.02849185, 0.02828838, 0.02967051,
0.02970099, 0.0296995 , 0.02898439, 0.02881245, 0.02743928,
0.02688038, 0.02688689, 0.0268874 , 0.02694642, 0.02323871,
0.02316793, 0.0237941 , 0.0241668 , 0.02046467, 0.02053574,
0.01971268],
[0.02883512, 0.02849185, 0.02828838, 0.02967051, 0.02970099,
0.0296995 , 0.02898439, 0.02881245, 0.02743928, 0.02688038,
0.02688689, 0.0268874 , 0.02694642, 0.02323871, 0.02316793,
0.0237941 , 0.0241668 , 0.02046467, 0.02053574, 0.01971268,
0.02033148],
[0.02849185, 0.02828838, 0.02967051, 0.02970099, 0.0296995 ,
0.02898439, 0.02881245, 0.02743928, 0.02688038, 0.02688689,
0.0268874 , 0.02694642, 0.02323871, 0.02316793, 0.0237941 ,
0.0241668 , 0.02046467, 0.02053574, 0.01971268, 0.02033148,
0.02034604],
[0.02828838, 0.02967051, 0.02970099, 0.0296995 , 0.02898439,
0.02881245, 0.02743928, 0.02688038, 0.02688689, 0.0268874 ,
0.02694642, 0.02323871, 0.02316793, 0.0237941 , 0.0241668 ,
0.02046467, 0.02053574, 0.01971268, 0.02033148, 0.02034604,
0.02126317],
[0.02967051, 0.02970099, 0.0296995 , 0.02898439, 0.02881245,
0.02743928, 0.02688038, 0.02688689, 0.0268874 , 0.02694642,
0.02323871, 0.02316793, 0.0237941 , 0.0241668 , 0.02046467,
0.02053574, 0.01971268, 0.02033148, 0.02034604, 0.02126317,
0.01967574],
[0.02970099, 0.0296995 , 0.02898439, 0.02881245, 0.02743928,
0.02688038, 0.02688689, 0.0268874 , 0.02694642, 0.02323871,
0.02316793, 0.0237941 , 0.0241668 , 0.02046467, 0.02053574,
0.01971268, 0.02033148, 0.02034604, 0.02126317, 0.01967574,
0.01968943],
[0.0296995 , 0.02898439, 0.02881245, 0.02743928, 0.02688038,
0.02688689, 0.0268874 , 0.02694642, 0.02323871, 0.02316793,
0.0237941 , 0.0241668 , 0.02046467, 0.02053574, 0.01971268,
0.02033148, 0.02034604, 0.02126317, 0.01967574, 0.01968943,
0.01992829],
[0.02898439, 0.02881245, 0.02743928, 0.02688038, 0.02688689,
0.0268874 , 0.02694642, 0.02323871, 0.02316793, 0.0237941 ,
0.0241668 , 0.02046467, 0.02053574, 0.01971268, 0.02033148,
0.02034604, 0.02126317, 0.01967574, 0.01968943, 0.01992829,
0.01982134],
[0.02881245, 0.02743928, 0.02688038, 0.02688689, 0.0268874 ,
0.02694642, 0.02323871, 0.02316793, 0.0237941 , 0.0241668 ,
0.02046467, 0.02053574, 0.01971268, 0.02033148, 0.02034604,
0.02126317, 0.01967574, 0.01968943, 0.01992829, 0.01982134,
0.01667527],
[0.02743928, 0.02688038, 0.02688689, 0.0268874 , 0.02694642,
0.02323871, 0.02316793, 0.0237941 , 0.0241668 , 0.02046467,
0.02053574, 0.01971268, 0.02033148, 0.02034604, 0.02126317,
0.01967574, 0.01968943, 0.01992829, 0.01982134, 0.01667527,
0.0166734 ],
[0.02688038, 0.02688689, 0.0268874 , 0.02694642, 0.02323871,
0.02316793, 0.0237941 , 0.0241668 , 0.02046467, 0.02053574,
0.01971268, 0.02033148, 0.02034604, 0.02126317, 0.01967574,
0.01968943, 0.01992829, 0.01982134, 0.01667527, 0.0166734 ,
0.01698105],
[0.02688689, 0.0268874 , 0.02694642, 0.02323871, 0.02316793,
0.0237941 , 0.0241668 , 0.02046467, 0.02053574, 0.01971268,
0.02033148, 0.02034604, 0.02126317, 0.01967574, 0.01968943,
0.01992829, 0.01982134, 0.01667527, 0.0166734 , 0.01698105,
0.01721507],
[0.0268874 , 0.02694642, 0.02323871, 0.02316793, 0.0237941 ,
0.0241668 , 0.02046467, 0.02053574, 0.01971268, 0.02033148,
0.02034604, 0.02126317, 0.01967574, 0.01968943, 0.01992829,
0.01982134, 0.01667527, 0.0166734 , 0.01698105, 0.01721507,
0.01594814],
[0.02694642, 0.02323871, 0.02316793, 0.0237941 , 0.0241668 ,
0.02046467, 0.02053574, 0.01971268, 0.02033148, 0.02034604,
0.02126317, 0.01967574, 0.01968943, 0.01992829, 0.01982134,
0.01667527, 0.0166734 , 0.01698105, 0.01721507, 0.01594814,
0.01604526],
[0.02323871, 0.02316793, 0.0237941 , 0.0241668 , 0.02046467,
0.02053574, 0.01971268, 0.02033148, 0.02034604, 0.02126317,
0.01967574, 0.01968943, 0.01992829, 0.01982134, 0.01667527,
0.0166734 , 0.01698105, 0.01721507, 0.01594814, 0.01604526,
0.01596223],
[0.02316793, 0.0237941 , 0.0241668 , 0.02046467, 0.02053574,
0.01971268, 0.02033148, 0.02034604, 0.02126317, 0.01967574,
0.01968943, 0.01992829, 0.01982134, 0.01667527, 0.0166734 ,
0.01698105, 0.01721507, 0.01594814, 0.01604526, 0.01596223,
0.01770891],
[0.0237941 , 0.0241668 , 0.02046467, 0.02053574, 0.01971268,
0.02033148, 0.02034604, 0.02126317, 0.01967574, 0.01968943,
0.01992829, 0.01982134, 0.01667527, 0.0166734 , 0.01698105,
0.01721507, 0.01594814, 0.01604526, 0.01596223, 0.01770891,
0.01883111],
[0.0241668 , 0.02046467, 0.02053574, 0.01971268, 0.02033148,
0.02034604, 0.02126317, 0.01967574, 0.01968943, 0.01992829,
0.01982134, 0.01667527, 0.0166734 , 0.01698105, 0.01721507,
0.01594814, 0.01604526, 0.01596223, 0.01770891, 0.01883111,
0.0243227 ],
[0.02046467, 0.02053574, 0.01971268, 0.02033148, 0.02034604,
0.02126317, 0.01967574, 0.01968943, 0.01992829, 0.01982134,
0.01667527, 0.0166734 , 0.01698105, 0.01721507, 0.01594814,
0.01604526, 0.01596223, 0.01770891, 0.01883111, 0.0243227 ,
0.02519831],
[0.02053574, 0.01971268, 0.02033148, 0.02034604, 0.02126317,
0.01967574, 0.01968943, 0.01992829, 0.01982134, 0.01667527,
0.0166734 , 0.01698105, 0.01721507, 0.01594814, 0.01604526,
0.01596223, 0.01770891, 0.01883111, 0.0243227 , 0.02519831,
0.02515081],
[0.01971268, 0.02033148, 0.02034604, 0.02126317, 0.01967574,
0.01968943, 0.01992829, 0.01982134, 0.01667527, 0.0166734 ,
0.01698105, 0.01721507, 0.01594814, 0.01604526, 0.01596223,
0.01770891, 0.01883111, 0.0243227 , 0.02519831, 0.02515081,
0.02520881]])
y_test_vola28_21
array([[0.02033148],
[0.02034604],
[0.02126317],
[0.01967574],
[0.01968943],
[0.01992829],
[0.01982134],
[0.01667527],
[0.0166734 ],
[0.01698105],
[0.01721507],
[0.01594814],
[0.01604526],
[0.01596223],
[0.01770891],
[0.01883111],
[0.0243227 ],
[0.02519831],
[0.02515081],
[0.02520881],
[0.02480357]])
Horizonte de 28 días (\(\tau=28\))
X_train_vola28_28, y_train_vola28_28, X_val_vola28_28, y_val_vola28_28, X_test_vola28_28, y_test_vola28_28 = create_time_series_datasets(df_1_st,'Volatilidad_28',tau28)
# Imprimir las dimensiones
print("X_train shape:", X_train_vola28_28.shape)
print("y_train shape:", y_train_vola28_28.shape)
print("X_val shape:", X_val_vola21_28.shape)
print("y_val shape:", y_val_vola21_28.shape)
print("X_test shape:", X_test_vola28_28.shape)
print("y_test shape:", y_test_vola28_28.shape)
X_train shape: (4887, 28)
y_train shape: (4887, 1)
X_val shape: (28, 28)
y_val shape: (28, 1)
X_test shape: (28, 28)
y_test shape: (28, 1)
A continuación se puede visualizar el arreglo para cada una de las matrices generadas para los datos de entrenamiento, validacióny pruebapara un \(\tau = 28\).
X_train_vola28_28
array([[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
[0. , 0. , 0. , ..., 0. , 0. ,
0. ],
...,
[0.02267613, 0.02284795, 0.0228733 , ..., 0.02388862, 0.02410246,
0.02451506],
[0.02284795, 0.0228733 , 0.0244102 , ..., 0.02410246, 0.02451506,
0.02434324],
[0.0228733 , 0.0244102 , 0.02572188, ..., 0.02451506, 0.02434324,
0.02412522]])
y_train_vola28_28
array([[0. ],
[0. ],
[0. ],
...,
[0.02434324],
[0.02412522],
[0.02404063]])
X_val_vola28_28
array([[0.0244102 , 0.02572188, 0.02587475, 0.02573349, 0.02580405,
0.02592489, 0.02588763, 0.02832466, 0.027697 , 0.02575233,
0.02382231, 0.02454451, 0.024523 , 0.02482233, 0.02534815,
0.02367498, 0.02308133, 0.02301646, 0.02300261, 0.02309728,
0.02332277, 0.0232827 , 0.02388862, 0.02410246, 0.02451506,
0.02434324, 0.02412522, 0.02404063],
[0.02572188, 0.02587475, 0.02573349, 0.02580405, 0.02592489,
0.02588763, 0.02832466, 0.027697 , 0.02575233, 0.02382231,
0.02454451, 0.024523 , 0.02482233, 0.02534815, 0.02367498,
0.02308133, 0.02301646, 0.02300261, 0.02309728, 0.02332277,
0.0232827 , 0.02388862, 0.02410246, 0.02451506, 0.02434324,
0.02412522, 0.02404063, 0.02366682],
[0.02587475, 0.02573349, 0.02580405, 0.02592489, 0.02588763,
0.02832466, 0.027697 , 0.02575233, 0.02382231, 0.02454451,
0.024523 , 0.02482233, 0.02534815, 0.02367498, 0.02308133,
0.02301646, 0.02300261, 0.02309728, 0.02332277, 0.0232827 ,
0.02388862, 0.02410246, 0.02451506, 0.02434324, 0.02412522,
0.02404063, 0.02366682, 0.02197955],
[0.02573349, 0.02580405, 0.02592489, 0.02588763, 0.02832466,
0.027697 , 0.02575233, 0.02382231, 0.02454451, 0.024523 ,
0.02482233, 0.02534815, 0.02367498, 0.02308133, 0.02301646,
0.02300261, 0.02309728, 0.02332277, 0.0232827 , 0.02388862,
0.02410246, 0.02451506, 0.02434324, 0.02412522, 0.02404063,
0.02366682, 0.02197955, 0.02373749],
[0.02580405, 0.02592489, 0.02588763, 0.02832466, 0.027697 ,
0.02575233, 0.02382231, 0.02454451, 0.024523 , 0.02482233,
0.02534815, 0.02367498, 0.02308133, 0.02301646, 0.02300261,
0.02309728, 0.02332277, 0.0232827 , 0.02388862, 0.02410246,
0.02451506, 0.02434324, 0.02412522, 0.02404063, 0.02366682,
0.02197955, 0.02373749, 0.02435859],
[0.02592489, 0.02588763, 0.02832466, 0.027697 , 0.02575233,
0.02382231, 0.02454451, 0.024523 , 0.02482233, 0.02534815,
0.02367498, 0.02308133, 0.02301646, 0.02300261, 0.02309728,
0.02332277, 0.0232827 , 0.02388862, 0.02410246, 0.02451506,
0.02434324, 0.02412522, 0.02404063, 0.02366682, 0.02197955,
0.02373749, 0.02435859, 0.02405806],
[0.02588763, 0.02832466, 0.027697 , 0.02575233, 0.02382231,
0.02454451, 0.024523 , 0.02482233, 0.02534815, 0.02367498,
0.02308133, 0.02301646, 0.02300261, 0.02309728, 0.02332277,
0.0232827 , 0.02388862, 0.02410246, 0.02451506, 0.02434324,
0.02412522, 0.02404063, 0.02366682, 0.02197955, 0.02373749,
0.02435859, 0.02405806, 0.02398615],
[0.02832466, 0.027697 , 0.02575233, 0.02382231, 0.02454451,
0.024523 , 0.02482233, 0.02534815, 0.02367498, 0.02308133,
0.02301646, 0.02300261, 0.02309728, 0.02332277, 0.0232827 ,
0.02388862, 0.02410246, 0.02451506, 0.02434324, 0.02412522,
0.02404063, 0.02366682, 0.02197955, 0.02373749, 0.02435859,
0.02405806, 0.02398615, 0.02398642],
[0.027697 , 0.02575233, 0.02382231, 0.02454451, 0.024523 ,
0.02482233, 0.02534815, 0.02367498, 0.02308133, 0.02301646,
0.02300261, 0.02309728, 0.02332277, 0.0232827 , 0.02388862,
0.02410246, 0.02451506, 0.02434324, 0.02412522, 0.02404063,
0.02366682, 0.02197955, 0.02373749, 0.02435859, 0.02405806,
0.02398615, 0.02398642, 0.02455684],
[0.02575233, 0.02382231, 0.02454451, 0.024523 , 0.02482233,
0.02534815, 0.02367498, 0.02308133, 0.02301646, 0.02300261,
0.02309728, 0.02332277, 0.0232827 , 0.02388862, 0.02410246,
0.02451506, 0.02434324, 0.02412522, 0.02404063, 0.02366682,
0.02197955, 0.02373749, 0.02435859, 0.02405806, 0.02398615,
0.02398642, 0.02455684, 0.02492649],
[0.02382231, 0.02454451, 0.024523 , 0.02482233, 0.02534815,
0.02367498, 0.02308133, 0.02301646, 0.02300261, 0.02309728,
0.02332277, 0.0232827 , 0.02388862, 0.02410246, 0.02451506,
0.02434324, 0.02412522, 0.02404063, 0.02366682, 0.02197955,
0.02373749, 0.02435859, 0.02405806, 0.02398615, 0.02398642,
0.02455684, 0.02492649, 0.02428771],
[0.02454451, 0.024523 , 0.02482233, 0.02534815, 0.02367498,
0.02308133, 0.02301646, 0.02300261, 0.02309728, 0.02332277,
0.0232827 , 0.02388862, 0.02410246, 0.02451506, 0.02434324,
0.02412522, 0.02404063, 0.02366682, 0.02197955, 0.02373749,
0.02435859, 0.02405806, 0.02398615, 0.02398642, 0.02455684,
0.02492649, 0.02428771, 0.02435126],
[0.024523 , 0.02482233, 0.02534815, 0.02367498, 0.02308133,
0.02301646, 0.02300261, 0.02309728, 0.02332277, 0.0232827 ,
0.02388862, 0.02410246, 0.02451506, 0.02434324, 0.02412522,
0.02404063, 0.02366682, 0.02197955, 0.02373749, 0.02435859,
0.02405806, 0.02398615, 0.02398642, 0.02455684, 0.02492649,
0.02428771, 0.02435126, 0.02809308],
[0.02482233, 0.02534815, 0.02367498, 0.02308133, 0.02301646,
0.02300261, 0.02309728, 0.02332277, 0.0232827 , 0.02388862,
0.02410246, 0.02451506, 0.02434324, 0.02412522, 0.02404063,
0.02366682, 0.02197955, 0.02373749, 0.02435859, 0.02405806,
0.02398615, 0.02398642, 0.02455684, 0.02492649, 0.02428771,
0.02435126, 0.02809308, 0.02805948],
[0.02534815, 0.02367498, 0.02308133, 0.02301646, 0.02300261,
0.02309728, 0.02332277, 0.0232827 , 0.02388862, 0.02410246,
0.02451506, 0.02434324, 0.02412522, 0.02404063, 0.02366682,
0.02197955, 0.02373749, 0.02435859, 0.02405806, 0.02398615,
0.02398642, 0.02455684, 0.02492649, 0.02428771, 0.02435126,
0.02809308, 0.02805948, 0.02820158],
[0.02367498, 0.02308133, 0.02301646, 0.02300261, 0.02309728,
0.02332277, 0.0232827 , 0.02388862, 0.02410246, 0.02451506,
0.02434324, 0.02412522, 0.02404063, 0.02366682, 0.02197955,
0.02373749, 0.02435859, 0.02405806, 0.02398615, 0.02398642,
0.02455684, 0.02492649, 0.02428771, 0.02435126, 0.02809308,
0.02805948, 0.02820158, 0.02778743],
[0.02308133, 0.02301646, 0.02300261, 0.02309728, 0.02332277,
0.0232827 , 0.02388862, 0.02410246, 0.02451506, 0.02434324,
0.02412522, 0.02404063, 0.02366682, 0.02197955, 0.02373749,
0.02435859, 0.02405806, 0.02398615, 0.02398642, 0.02455684,
0.02492649, 0.02428771, 0.02435126, 0.02809308, 0.02805948,
0.02820158, 0.02778743, 0.0278551 ],
[0.02301646, 0.02300261, 0.02309728, 0.02332277, 0.0232827 ,
0.02388862, 0.02410246, 0.02451506, 0.02434324, 0.02412522,
0.02404063, 0.02366682, 0.02197955, 0.02373749, 0.02435859,
0.02405806, 0.02398615, 0.02398642, 0.02455684, 0.02492649,
0.02428771, 0.02435126, 0.02809308, 0.02805948, 0.02820158,
0.02778743, 0.0278551 , 0.02718499],
[0.02300261, 0.02309728, 0.02332277, 0.0232827 , 0.02388862,
0.02410246, 0.02451506, 0.02434324, 0.02412522, 0.02404063,
0.02366682, 0.02197955, 0.02373749, 0.02435859, 0.02405806,
0.02398615, 0.02398642, 0.02455684, 0.02492649, 0.02428771,
0.02435126, 0.02809308, 0.02805948, 0.02820158, 0.02778743,
0.0278551 , 0.02718499, 0.02791418],
[0.02309728, 0.02332277, 0.0232827 , 0.02388862, 0.02410246,
0.02451506, 0.02434324, 0.02412522, 0.02404063, 0.02366682,
0.02197955, 0.02373749, 0.02435859, 0.02405806, 0.02398615,
0.02398642, 0.02455684, 0.02492649, 0.02428771, 0.02435126,
0.02809308, 0.02805948, 0.02820158, 0.02778743, 0.0278551 ,
0.02718499, 0.02791418, 0.02797342],
[0.02332277, 0.0232827 , 0.02388862, 0.02410246, 0.02451506,
0.02434324, 0.02412522, 0.02404063, 0.02366682, 0.02197955,
0.02373749, 0.02435859, 0.02405806, 0.02398615, 0.02398642,
0.02455684, 0.02492649, 0.02428771, 0.02435126, 0.02809308,
0.02805948, 0.02820158, 0.02778743, 0.0278551 , 0.02718499,
0.02791418, 0.02797342, 0.02796468],
[0.0232827 , 0.02388862, 0.02410246, 0.02451506, 0.02434324,
0.02412522, 0.02404063, 0.02366682, 0.02197955, 0.02373749,
0.02435859, 0.02405806, 0.02398615, 0.02398642, 0.02455684,
0.02492649, 0.02428771, 0.02435126, 0.02809308, 0.02805948,
0.02820158, 0.02778743, 0.0278551 , 0.02718499, 0.02791418,
0.02797342, 0.02796468, 0.02780442],
[0.02388862, 0.02410246, 0.02451506, 0.02434324, 0.02412522,
0.02404063, 0.02366682, 0.02197955, 0.02373749, 0.02435859,
0.02405806, 0.02398615, 0.02398642, 0.02455684, 0.02492649,
0.02428771, 0.02435126, 0.02809308, 0.02805948, 0.02820158,
0.02778743, 0.0278551 , 0.02718499, 0.02791418, 0.02797342,
0.02796468, 0.02780442, 0.02906891],
[0.02410246, 0.02451506, 0.02434324, 0.02412522, 0.02404063,
0.02366682, 0.02197955, 0.02373749, 0.02435859, 0.02405806,
0.02398615, 0.02398642, 0.02455684, 0.02492649, 0.02428771,
0.02435126, 0.02809308, 0.02805948, 0.02820158, 0.02778743,
0.0278551 , 0.02718499, 0.02791418, 0.02797342, 0.02796468,
0.02780442, 0.02906891, 0.02883512],
[0.02451506, 0.02434324, 0.02412522, 0.02404063, 0.02366682,
0.02197955, 0.02373749, 0.02435859, 0.02405806, 0.02398615,
0.02398642, 0.02455684, 0.02492649, 0.02428771, 0.02435126,
0.02809308, 0.02805948, 0.02820158, 0.02778743, 0.0278551 ,
0.02718499, 0.02791418, 0.02797342, 0.02796468, 0.02780442,
0.02906891, 0.02883512, 0.02849185],
[0.02434324, 0.02412522, 0.02404063, 0.02366682, 0.02197955,
0.02373749, 0.02435859, 0.02405806, 0.02398615, 0.02398642,
0.02455684, 0.02492649, 0.02428771, 0.02435126, 0.02809308,
0.02805948, 0.02820158, 0.02778743, 0.0278551 , 0.02718499,
0.02791418, 0.02797342, 0.02796468, 0.02780442, 0.02906891,
0.02883512, 0.02849185, 0.02828838],
[0.02412522, 0.02404063, 0.02366682, 0.02197955, 0.02373749,
0.02435859, 0.02405806, 0.02398615, 0.02398642, 0.02455684,
0.02492649, 0.02428771, 0.02435126, 0.02809308, 0.02805948,
0.02820158, 0.02778743, 0.0278551 , 0.02718499, 0.02791418,
0.02797342, 0.02796468, 0.02780442, 0.02906891, 0.02883512,
0.02849185, 0.02828838, 0.02967051],
[0.02404063, 0.02366682, 0.02197955, 0.02373749, 0.02435859,
0.02405806, 0.02398615, 0.02398642, 0.02455684, 0.02492649,
0.02428771, 0.02435126, 0.02809308, 0.02805948, 0.02820158,
0.02778743, 0.0278551 , 0.02718499, 0.02791418, 0.02797342,
0.02796468, 0.02780442, 0.02906891, 0.02883512, 0.02849185,
0.02828838, 0.02967051, 0.02970099]])
y_val_vola28_28
array([[0.02366682],
[0.02197955],
[0.02373749],
[0.02435859],
[0.02405806],
[0.02398615],
[0.02398642],
[0.02455684],
[0.02492649],
[0.02428771],
[0.02435126],
[0.02809308],
[0.02805948],
[0.02820158],
[0.02778743],
[0.0278551 ],
[0.02718499],
[0.02791418],
[0.02797342],
[0.02796468],
[0.02780442],
[0.02906891],
[0.02883512],
[0.02849185],
[0.02828838],
[0.02967051],
[0.02970099],
[0.0296995 ]])
X_test_vola28_28
array([[0.02366682, 0.02197955, 0.02373749, 0.02435859, 0.02405806,
0.02398615, 0.02398642, 0.02455684, 0.02492649, 0.02428771,
0.02435126, 0.02809308, 0.02805948, 0.02820158, 0.02778743,
0.0278551 , 0.02718499, 0.02791418, 0.02797342, 0.02796468,
0.02780442, 0.02906891, 0.02883512, 0.02849185, 0.02828838,
0.02967051, 0.02970099, 0.0296995 ],
[0.02197955, 0.02373749, 0.02435859, 0.02405806, 0.02398615,
0.02398642, 0.02455684, 0.02492649, 0.02428771, 0.02435126,
0.02809308, 0.02805948, 0.02820158, 0.02778743, 0.0278551 ,
0.02718499, 0.02791418, 0.02797342, 0.02796468, 0.02780442,
0.02906891, 0.02883512, 0.02849185, 0.02828838, 0.02967051,
0.02970099, 0.0296995 , 0.02898439],
[0.02373749, 0.02435859, 0.02405806, 0.02398615, 0.02398642,
0.02455684, 0.02492649, 0.02428771, 0.02435126, 0.02809308,
0.02805948, 0.02820158, 0.02778743, 0.0278551 , 0.02718499,
0.02791418, 0.02797342, 0.02796468, 0.02780442, 0.02906891,
0.02883512, 0.02849185, 0.02828838, 0.02967051, 0.02970099,
0.0296995 , 0.02898439, 0.02881245],
[0.02435859, 0.02405806, 0.02398615, 0.02398642, 0.02455684,
0.02492649, 0.02428771, 0.02435126, 0.02809308, 0.02805948,
0.02820158, 0.02778743, 0.0278551 , 0.02718499, 0.02791418,
0.02797342, 0.02796468, 0.02780442, 0.02906891, 0.02883512,
0.02849185, 0.02828838, 0.02967051, 0.02970099, 0.0296995 ,
0.02898439, 0.02881245, 0.02743928],
[0.02405806, 0.02398615, 0.02398642, 0.02455684, 0.02492649,
0.02428771, 0.02435126, 0.02809308, 0.02805948, 0.02820158,
0.02778743, 0.0278551 , 0.02718499, 0.02791418, 0.02797342,
0.02796468, 0.02780442, 0.02906891, 0.02883512, 0.02849185,
0.02828838, 0.02967051, 0.02970099, 0.0296995 , 0.02898439,
0.02881245, 0.02743928, 0.02688038],
[0.02398615, 0.02398642, 0.02455684, 0.02492649, 0.02428771,
0.02435126, 0.02809308, 0.02805948, 0.02820158, 0.02778743,
0.0278551 , 0.02718499, 0.02791418, 0.02797342, 0.02796468,
0.02780442, 0.02906891, 0.02883512, 0.02849185, 0.02828838,
0.02967051, 0.02970099, 0.0296995 , 0.02898439, 0.02881245,
0.02743928, 0.02688038, 0.02688689],
[0.02398642, 0.02455684, 0.02492649, 0.02428771, 0.02435126,
0.02809308, 0.02805948, 0.02820158, 0.02778743, 0.0278551 ,
0.02718499, 0.02791418, 0.02797342, 0.02796468, 0.02780442,
0.02906891, 0.02883512, 0.02849185, 0.02828838, 0.02967051,
0.02970099, 0.0296995 , 0.02898439, 0.02881245, 0.02743928,
0.02688038, 0.02688689, 0.0268874 ],
[0.02455684, 0.02492649, 0.02428771, 0.02435126, 0.02809308,
0.02805948, 0.02820158, 0.02778743, 0.0278551 , 0.02718499,
0.02791418, 0.02797342, 0.02796468, 0.02780442, 0.02906891,
0.02883512, 0.02849185, 0.02828838, 0.02967051, 0.02970099,
0.0296995 , 0.02898439, 0.02881245, 0.02743928, 0.02688038,
0.02688689, 0.0268874 , 0.02694642],
[0.02492649, 0.02428771, 0.02435126, 0.02809308, 0.02805948,
0.02820158, 0.02778743, 0.0278551 , 0.02718499, 0.02791418,
0.02797342, 0.02796468, 0.02780442, 0.02906891, 0.02883512,
0.02849185, 0.02828838, 0.02967051, 0.02970099, 0.0296995 ,
0.02898439, 0.02881245, 0.02743928, 0.02688038, 0.02688689,
0.0268874 , 0.02694642, 0.02323871],
[0.02428771, 0.02435126, 0.02809308, 0.02805948, 0.02820158,
0.02778743, 0.0278551 , 0.02718499, 0.02791418, 0.02797342,
0.02796468, 0.02780442, 0.02906891, 0.02883512, 0.02849185,
0.02828838, 0.02967051, 0.02970099, 0.0296995 , 0.02898439,
0.02881245, 0.02743928, 0.02688038, 0.02688689, 0.0268874 ,
0.02694642, 0.02323871, 0.02316793],
[0.02435126, 0.02809308, 0.02805948, 0.02820158, 0.02778743,
0.0278551 , 0.02718499, 0.02791418, 0.02797342, 0.02796468,
0.02780442, 0.02906891, 0.02883512, 0.02849185, 0.02828838,
0.02967051, 0.02970099, 0.0296995 , 0.02898439, 0.02881245,
0.02743928, 0.02688038, 0.02688689, 0.0268874 , 0.02694642,
0.02323871, 0.02316793, 0.0237941 ],
[0.02809308, 0.02805948, 0.02820158, 0.02778743, 0.0278551 ,
0.02718499, 0.02791418, 0.02797342, 0.02796468, 0.02780442,
0.02906891, 0.02883512, 0.02849185, 0.02828838, 0.02967051,
0.02970099, 0.0296995 , 0.02898439, 0.02881245, 0.02743928,
0.02688038, 0.02688689, 0.0268874 , 0.02694642, 0.02323871,
0.02316793, 0.0237941 , 0.0241668 ],
[0.02805948, 0.02820158, 0.02778743, 0.0278551 , 0.02718499,
0.02791418, 0.02797342, 0.02796468, 0.02780442, 0.02906891,
0.02883512, 0.02849185, 0.02828838, 0.02967051, 0.02970099,
0.0296995 , 0.02898439, 0.02881245, 0.02743928, 0.02688038,
0.02688689, 0.0268874 , 0.02694642, 0.02323871, 0.02316793,
0.0237941 , 0.0241668 , 0.02046467],
[0.02820158, 0.02778743, 0.0278551 , 0.02718499, 0.02791418,
0.02797342, 0.02796468, 0.02780442, 0.02906891, 0.02883512,
0.02849185, 0.02828838, 0.02967051, 0.02970099, 0.0296995 ,
0.02898439, 0.02881245, 0.02743928, 0.02688038, 0.02688689,
0.0268874 , 0.02694642, 0.02323871, 0.02316793, 0.0237941 ,
0.0241668 , 0.02046467, 0.02053574],
[0.02778743, 0.0278551 , 0.02718499, 0.02791418, 0.02797342,
0.02796468, 0.02780442, 0.02906891, 0.02883512, 0.02849185,
0.02828838, 0.02967051, 0.02970099, 0.0296995 , 0.02898439,
0.02881245, 0.02743928, 0.02688038, 0.02688689, 0.0268874 ,
0.02694642, 0.02323871, 0.02316793, 0.0237941 , 0.0241668 ,
0.02046467, 0.02053574, 0.01971268],
[0.0278551 , 0.02718499, 0.02791418, 0.02797342, 0.02796468,
0.02780442, 0.02906891, 0.02883512, 0.02849185, 0.02828838,
0.02967051, 0.02970099, 0.0296995 , 0.02898439, 0.02881245,
0.02743928, 0.02688038, 0.02688689, 0.0268874 , 0.02694642,
0.02323871, 0.02316793, 0.0237941 , 0.0241668 , 0.02046467,
0.02053574, 0.01971268, 0.02033148],
[0.02718499, 0.02791418, 0.02797342, 0.02796468, 0.02780442,
0.02906891, 0.02883512, 0.02849185, 0.02828838, 0.02967051,
0.02970099, 0.0296995 , 0.02898439, 0.02881245, 0.02743928,
0.02688038, 0.02688689, 0.0268874 , 0.02694642, 0.02323871,
0.02316793, 0.0237941 , 0.0241668 , 0.02046467, 0.02053574,
0.01971268, 0.02033148, 0.02034604],
[0.02791418, 0.02797342, 0.02796468, 0.02780442, 0.02906891,
0.02883512, 0.02849185, 0.02828838, 0.02967051, 0.02970099,
0.0296995 , 0.02898439, 0.02881245, 0.02743928, 0.02688038,
0.02688689, 0.0268874 , 0.02694642, 0.02323871, 0.02316793,
0.0237941 , 0.0241668 , 0.02046467, 0.02053574, 0.01971268,
0.02033148, 0.02034604, 0.02126317],
[0.02797342, 0.02796468, 0.02780442, 0.02906891, 0.02883512,
0.02849185, 0.02828838, 0.02967051, 0.02970099, 0.0296995 ,
0.02898439, 0.02881245, 0.02743928, 0.02688038, 0.02688689,
0.0268874 , 0.02694642, 0.02323871, 0.02316793, 0.0237941 ,
0.0241668 , 0.02046467, 0.02053574, 0.01971268, 0.02033148,
0.02034604, 0.02126317, 0.01967574],
[0.02796468, 0.02780442, 0.02906891, 0.02883512, 0.02849185,
0.02828838, 0.02967051, 0.02970099, 0.0296995 , 0.02898439,
0.02881245, 0.02743928, 0.02688038, 0.02688689, 0.0268874 ,
0.02694642, 0.02323871, 0.02316793, 0.0237941 , 0.0241668 ,
0.02046467, 0.02053574, 0.01971268, 0.02033148, 0.02034604,
0.02126317, 0.01967574, 0.01968943],
[0.02780442, 0.02906891, 0.02883512, 0.02849185, 0.02828838,
0.02967051, 0.02970099, 0.0296995 , 0.02898439, 0.02881245,
0.02743928, 0.02688038, 0.02688689, 0.0268874 , 0.02694642,
0.02323871, 0.02316793, 0.0237941 , 0.0241668 , 0.02046467,
0.02053574, 0.01971268, 0.02033148, 0.02034604, 0.02126317,
0.01967574, 0.01968943, 0.01992829],
[0.02906891, 0.02883512, 0.02849185, 0.02828838, 0.02967051,
0.02970099, 0.0296995 , 0.02898439, 0.02881245, 0.02743928,
0.02688038, 0.02688689, 0.0268874 , 0.02694642, 0.02323871,
0.02316793, 0.0237941 , 0.0241668 , 0.02046467, 0.02053574,
0.01971268, 0.02033148, 0.02034604, 0.02126317, 0.01967574,
0.01968943, 0.01992829, 0.01982134],
[0.02883512, 0.02849185, 0.02828838, 0.02967051, 0.02970099,
0.0296995 , 0.02898439, 0.02881245, 0.02743928, 0.02688038,
0.02688689, 0.0268874 , 0.02694642, 0.02323871, 0.02316793,
0.0237941 , 0.0241668 , 0.02046467, 0.02053574, 0.01971268,
0.02033148, 0.02034604, 0.02126317, 0.01967574, 0.01968943,
0.01992829, 0.01982134, 0.01667527],
[0.02849185, 0.02828838, 0.02967051, 0.02970099, 0.0296995 ,
0.02898439, 0.02881245, 0.02743928, 0.02688038, 0.02688689,
0.0268874 , 0.02694642, 0.02323871, 0.02316793, 0.0237941 ,
0.0241668 , 0.02046467, 0.02053574, 0.01971268, 0.02033148,
0.02034604, 0.02126317, 0.01967574, 0.01968943, 0.01992829,
0.01982134, 0.01667527, 0.0166734 ],
[0.02828838, 0.02967051, 0.02970099, 0.0296995 , 0.02898439,
0.02881245, 0.02743928, 0.02688038, 0.02688689, 0.0268874 ,
0.02694642, 0.02323871, 0.02316793, 0.0237941 , 0.0241668 ,
0.02046467, 0.02053574, 0.01971268, 0.02033148, 0.02034604,
0.02126317, 0.01967574, 0.01968943, 0.01992829, 0.01982134,
0.01667527, 0.0166734 , 0.01698105],
[0.02967051, 0.02970099, 0.0296995 , 0.02898439, 0.02881245,
0.02743928, 0.02688038, 0.02688689, 0.0268874 , 0.02694642,
0.02323871, 0.02316793, 0.0237941 , 0.0241668 , 0.02046467,
0.02053574, 0.01971268, 0.02033148, 0.02034604, 0.02126317,
0.01967574, 0.01968943, 0.01992829, 0.01982134, 0.01667527,
0.0166734 , 0.01698105, 0.01721507],
[0.02970099, 0.0296995 , 0.02898439, 0.02881245, 0.02743928,
0.02688038, 0.02688689, 0.0268874 , 0.02694642, 0.02323871,
0.02316793, 0.0237941 , 0.0241668 , 0.02046467, 0.02053574,
0.01971268, 0.02033148, 0.02034604, 0.02126317, 0.01967574,
0.01968943, 0.01992829, 0.01982134, 0.01667527, 0.0166734 ,
0.01698105, 0.01721507, 0.01594814],
[0.0296995 , 0.02898439, 0.02881245, 0.02743928, 0.02688038,
0.02688689, 0.0268874 , 0.02694642, 0.02323871, 0.02316793,
0.0237941 , 0.0241668 , 0.02046467, 0.02053574, 0.01971268,
0.02033148, 0.02034604, 0.02126317, 0.01967574, 0.01968943,
0.01992829, 0.01982134, 0.01667527, 0.0166734 , 0.01698105,
0.01721507, 0.01594814, 0.01604526]])
y_test_vola28_28
array([[0.02898439],
[0.02881245],
[0.02743928],
[0.02688038],
[0.02688689],
[0.0268874 ],
[0.02694642],
[0.02323871],
[0.02316793],
[0.0237941 ],
[0.0241668 ],
[0.02046467],
[0.02053574],
[0.01971268],
[0.02033148],
[0.02034604],
[0.02126317],
[0.01967574],
[0.01968943],
[0.01992829],
[0.01982134],
[0.01667527],
[0.0166734 ],
[0.01698105],
[0.01721507],
[0.01594814],
[0.01604526],
[0.01596223]])
Modelos predictivos#
Definición de funciónes
La función plot_model() nos permitirá visualizar los conjuntos de entrenamiento, prueba y las predicciones correspondientes al conjunto de prueba. También, en el título de los gráficos, se incluirá la métrica MAE.
import plotly.graph_objects as go
def plot_model(train, val, test, y_pred_val, y_pred_test, title):
# Crear la figura
fig = go.Figure()
# Añadir la serie de entrenamiento
fig.add_trace(go.Scatter(
x=train.index,
y=train,
mode='lines',
name='Entrenamiento',
line=dict(color='blue')
))
# Añadir la serie de validación
fig.add_trace(go.Scatter(
x=val.index,
y=val,
mode='lines',
name='Validación',
line=dict(color='orange')
))
# Añadir la serie de predicción validación
fig.add_trace(go.Scatter(
x=val.index,
y=y_pred_val,
mode='lines',
name='Predicción Validación',
line=dict(color='red', dash='dash') # Líneas punteadas para las predicciones
))
# Añadir la serie de test
fig.add_trace(go.Scatter(
x=test.index,
y=test,
mode='lines',
name='Prueba',
line=dict(color='green')
))
# Añadir la serie de predicción test
fig.add_trace(go.Scatter(
x=test.index,
y=y_pred_test,
mode='lines',
name='Predicción Prueba',
line=dict(color='purple', dash='dash') # Líneas punteadas para las predicciones
))
# Actualizar el diseño de la gráfica
fig.update_layout(
title=f"{title}",
xaxis_title="Tiempo",
yaxis_title="Valor",
legend_title="Series",
template="plotly_white", # Usar fondo blanco
xaxis=dict(range=[train.index.min(), test.index.max()]) # Ajustar el rango del eje x
)
# Mostrar la gráfica interactiva
fig.show()
A continuación se define función metricas() para estimación de métricas de desempeño.
def metricas(y, out):
if len(y) > 0 and (type(y[0]) == list or type(y[0]) == np.ndarray) :
flattened = [val for sublist in y for val in sublist]
y = flattened
if type(out) != np.ndarray:
out = np.array(out)
if type(y) != np.ndarray:
y = np.array(y)
T = len(y)
yh = y
prederr = yh - out
SSE = sum(prederr**2)
MAPE = round(100 * sum(abs(prederr / yh)[yh != 0]) / T, 2)
MAD = round(sum(abs(prederr)) / T, 2)
MSD = round(sum(prederr**2) / T, 2)
r2 = round(r2_score(yh, out) * 100, 2)
ret1 = pd.DataFrame({
"SSE": [SSE],
"MAPE": [f"{MAPE}%"],
"MAD": [MAD],
"MSD": [MSD],
"R2": [f"{r2}%"]
})
ret1.reset_index(drop=True, inplace=True)
return ret1
A continuación se define función evaluate_residuals() para la evaluación de los residuales del conjunto de prueba.
def evaluate_residuals(test_data, predictions):
residuals = test_data - predictions
# Prueba de Ljung-Box
ljung_box_results = acorr_ljungbox(residuals)
ljung_box_pval = ljung_box_results['lb_pvalue'].values[0]
ljung_box_stat = ljung_box_results['lb_stat'].values[0]
#print('Ljung-Box LB Statistic: %f' % ljung_box_stat)
#print('Ljung-Box p-value: %f' % ljung_box_pval)
# Evaluar resultados del test
if ljung_box_pval > 0.05:
print('No se rechaza H0: los residuales son independientes (no correlacionados).')
else:
print('Se rechaza H0: hay autocorrelación en los residuales.')
# Test de Jarque-Bera
jarque_bera_results = jarque_bera(residuals)
jarque_bera_pval = jarque_bera_results[1] # Accede al p-valor
#print('Jarque-Bera p-value: %f' % jarque_bera_pval)
# Evaluar resultados del test
if jarque_bera_pval > 0.05:
print('No se rechaza H0: los residuales siguen una distribución normal.')
else:
print('Se rechaza H0: los residuales no siguen una distribución normal.')
return ljung_box_pval, jarque_bera_pval
A continuación se define función evaluate_residuals_train() para la evaluación de los residuales del conjunto de Entrenamiento.
def evaluate_residuals_train(residuals):
# Prueba de Ljung-Box
ljung_box_results = acorr_ljungbox(residuals)
ljung_box_pval = ljung_box_results['lb_pvalue'].values[0]
ljung_box_stat = ljung_box_results['lb_stat'].values[0]
print('Ljung-Box LB Statistic: %f' % ljung_box_stat)
print('Ljung-Box p-value: %f' % ljung_box_pval)
# Evaluar resultados del test
if ljung_box_pval > 0.05:
print('No se rechaza H0: los residuales son independientes (no correlacionados).')
else:
print('Se rechaza H0: hay autocorrelación en los residuales.')
# Test de Jarque-Bera
jarque_bera_results = jarque_bera(residuals)
jarque_bera_pval = jarque_bera_results[1] # Accede al p-valor
print('Jarque-Bera p-value: %f' % jarque_bera_pval)
# Evaluar resultados del test
if jarque_bera_pval > 0.05:
print('No se rechaza H0: los residuales siguen una distribución normal.')
else:
print('Se rechaza H0: los residuales no siguen una distribución normal.')
return ljung_box_pval, jarque_bera_pval
A continuación se define función diagnostic_plots() para la evaluación de los residuales del conjunto de entrenamiento.
def diagnostic_plots(y, y_hat):
"""
Función para calcular y graficar los residuos de un modelo MLP.
Parameters:
y (array-like): Valores reales.
y_hat (array-like): Predicciones del modelo.
"""
# Calcular residuos
residuos = y.flatten() - y_hat
# Crear la figura y los ejes
fig, axes = plt.subplots(nrows=3, ncols=1, figsize=(8, 8))
# Gráfico de residuos
axes[0].plot(residuos, label='Residuos', color='#1f77b4')
axes[0].axhline(0, color='black', linestyle='--', linewidth=1)
axes[0].set_title('Residuos')
axes[0].legend()
# Gráfico QQ plot
sm.qqplot(residuos, line='s', ax=axes[1])
axes[1].set_title('QQ Plot de los Residuos')
# Gráfico de autocorrelación parcial (PACF)
sm.graphics.tsa.plot_pacf(residuos, lags=30, ax=axes[2])
axes[2].set_title('Autocorrelación Parcial de los Residuos')
plt.suptitle('Diagnóstico de Residuales Conjunto de Entrenamiento Modelo MLP', fontsize=16)
plt.tight_layout(rect=[0, 0.03, 1, 0.95]) # Ajustar el layout para el título
plt.show()
# Evaluar los residuos y retornar p-values
ljung_box_pval, jarque_bera_pval = evaluate_residuals_train(residuos)
return ljung_box_pval, jarque_bera_pval
A continuación se define función errores_plots() para la evaluación de los residuales del conjunto de entrenamiento, validación y test.
import numpy as np
import plotly.graph_objects as go
def errores_plots(y_train, y_hat_train, y_val, y_hat_val, y_test, y_hat_test):
# Calcular residuos
residuos_train = y_train.flatten() - y_hat_train
residuos_val = y_val.flatten() - y_hat_val
residuos_test = y_test.flatten() - y_hat_test
# Crear la figura para el boxplot
figbox = go.Figure()
# Añadir boxplots
figbox.add_trace(go.Box(
y=residuos_train,
name='Entrenamiento',
marker_color='#30143F'
))
figbox.add_trace(go.Box(
y=residuos_val,
name='Validación',
marker_color='#734764'
))
figbox.add_trace(go.Box(
y=residuos_test,
name='Prueba',
marker_color='#d98e31'
))
# Añadir líneas para la media
media_train = np.mean(residuos_train)
media_val = np.mean(residuos_val)
media_test = np.mean(residuos_test)
figbox.add_trace(go.Scatter(
x=['Entrenamiento', 'Validación', 'Prueba'],
y=[media_train, media_val, media_test],
mode='markers',
name='Media',
marker=dict(color='black', size=10, symbol='x')
))
# Títulos y diseño
figbox.update_layout(
title='Boxplot de Residuales con Media',
xaxis_title='Conjuntos',
yaxis_title='Residuos',
boxmode='group'
)
figbox.show()
A continuación se define función plot_best_model_validation_loss() para la evaluación del error/score vs las epocas del mejor modelo seleccionado para cada modelo.
def plot_best_model_validation_loss(history_at_LSTM):
"""
Función para encontrar el mejor modelo basado en la mínima pérdida de validación
y graficar su pérdida de validación.
Parameters:
history_at_LSTM (list): Lista de historias de modelos, donde cada historia es un diccionario
que contiene 'val_loss'.
"""
val_losses = []
model_indices = []
# Iterar sobre las historias para encontrar el mejor modelo
for i, history in enumerate(history_at_LSTM):
if 'val_loss' in history:
val_loss = history['val_loss']
min_val_loss = np.min(val_loss)
val_losses.append(min_val_loss)
model_indices.append(i)
# Encontrar el índice del mejor modelo
best_model_index = model_indices[np.argmin(val_losses)]
best_model_history = history_at_LSTM[best_model_index]
# Extraer val_loss y épocas
epochs = range(1, len(best_model_history['val_loss']) + 1)
val_loss_values = best_model_history['val_loss']
# Crear un DataFrame
df_val_loss = pd.DataFrame({
'Epoch': epochs,
'Val_Loss': val_loss_values
})
# Crear la figura
fig = go.Figure()
# Añadir la traza de la pérdida de validación
fig.add_trace(go.Scatter(
x=df_val_loss['Epoch'],
y=df_val_loss['Val_Loss'],
mode='markers+lines',
name='Validation Loss',
marker=dict(size=8),
line=dict(width=2)
))
# Títulos y etiquetas
fig.update_layout(
title='Run vs Error/Score',
xaxis_title='Epocas',
yaxis_title='Error/Score',
template='plotly',
showlegend=True
)
# Mostrar la figura
fig.show()
Price: Perceptrones Multicapa (MLP)#
Horizonte de 7 días (\(\tau=7\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(7,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20, 50, 100], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_price7, y_train_price7)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 3 candidates, totalling 15 fits
Mejor función de activación: relu
Mejor número de epocas: 50
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -710.8922546386718
A continuación, configuramos la red MLP empleando la API funcional de Keras. Con este método, una capa puede ser designada como la entrada para la siguiente capa en el momento de su definición.
En este contexto, Input es una función utilizada para establecer una capa de entrada en un modelo de red neuronal. La propiedad shape=(7,) define la estructura de los datos de entrada, lo que indica que estos tendrán 7 dimensiones. Por otro lado, dtype='float32' especifica que los elementos de esta capa serán números de punto flotante de 32 bits.
A continuación se definen las capas ocultas
Dense: Se refiere a una capa completamente conectada (fully connected).Unidades(Neuronas): [10, 100, 1000 y 10000] neuronas en esta capa.dense1: Esta capa recibe como entrada input_layer, que representa la capa inicial del modelo.
Las múltiples capas ocultas y el elevado número de neuronas en cada una permiten a las redes neuronales capturar la complejidad de las relaciones no lineales entre los regresores y la variable objetivo. No obstante, este diseño también puede llevar al sobreajuste, afectando negativamente el rendimiento en los conjuntos de validación o prueba. Para mitigar este riesgo, se emplea la técnica de Dropout, que regula eficazmente las redes neuronales profundas.
Se incorpora una capa Dropout antes de la capa de salida, que apaga aleatoriamente un porcentaje de neuronas. Esta técnica actúa como un método de ensamblaje, similar al bootstrap, evitando que la red dependa en exceso de neuronas específicas y mejorando así su capacidad de generalización. Al igual que un bosque aleatorio, que construye árboles a partir de subconjuntos aleatorios de características, se establece un Dropout del 20, 40, 60 y 80 porciento, eliminando de manera aleatoria esa fracción de neuronas en cada paso.
Por último, la capa de salida predice el valor del día siguiente.
Las capas de entrada, oculta y de salida se combinan en un modelo que permite entrenar y realizar predicciones. Se utiliza el error cuadrático medio (MSE) como función de pérdida. Para optimizar los pesos de la red, se aplica el algoritmo adams, que se destaca por su adaptabilidad y popularidad en el entrenamiento de redes neuronales profundas. A diferencia del Gradiente Descendente Estocástico, Adam ajusta individualmente las tasas de aprendizaje para cada peso, actualizándolas en función de medias móviles ponderadas de los gradientes y sus cuadrados.
A continuación se define función buil_models_mlp para generación de modelos de acuerdo a los parámetros indexados a tráves de la lista.
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Dropout
from tensorflow.keras.models import Model
def build_models_mlp(input_shape, neurons_list, dropout_rates, activation):
models = []
for neurons in neurons_list:
for dropout in dropout_rates:
input_layer = Input(shape=(input_shape,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
# Crear el modelo
ts_model = Model(inputs=input_layer, outputs=output_layer)
# Compilar el modelo
learning_rate = 0.001 # Ajusta este valor según tus necesidades
optimizer = tf.keras.optimizers.legacy.Adam(learning_rate=learning_rate)
ts_model.compile(loss='mean_absolute_error', optimizer=optimizer)
# Guardar el modelo en la lista
models.append(ts_model)
# Mostrar un resumen del modelo
print(f'Modelo con {neurons} neuronas y dropout {dropout}:')
ts_model.summary()
print('-' * 40)
return models
A idexación de parámetros función build_models con base a lo indicado en el númeral 3 del parcial práctico.
input_shape7 = 7
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP7 = build_models_mlp(input_shape7, neurons_list, dropout_rates, 'relu')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) [(None, 7)] 0
dense_4 (Dense) (None, 32) 256
dense_5 (Dense) (None, 16) 528
dense_6 (Dense) (None, 16) 272
dropout_1 (Dropout) (None, 16) 0
dense_7 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_3 (InputLayer) [(None, 7)] 0
dense_8 (Dense) (None, 32) 256
dense_9 (Dense) (None, 16) 528
dense_10 (Dense) (None, 16) 272
dropout_2 (Dropout) (None, 16) 0
dense_11 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_3"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_4 (InputLayer) [(None, 7)] 0
dense_12 (Dense) (None, 32) 256
dense_13 (Dense) (None, 16) 528
dense_14 (Dense) (None, 16) 272
dropout_3 (Dropout) (None, 16) 0
dense_15 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_4"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_5 (InputLayer) [(None, 7)] 0
dense_16 (Dense) (None, 32) 256
dense_17 (Dense) (None, 16) 528
dense_18 (Dense) (None, 16) 272
dropout_4 (Dropout) (None, 16) 0
dense_19 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_5"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_6 (InputLayer) [(None, 7)] 0
dense_20 (Dense) (None, 32) 256
dense_21 (Dense) (None, 16) 528
dense_22 (Dense) (None, 16) 272
dropout_5 (Dropout) (None, 16) 0
dense_23 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_6"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_7 (InputLayer) [(None, 7)] 0
dense_24 (Dense) (None, 32) 256
dense_25 (Dense) (None, 16) 528
dense_26 (Dense) (None, 16) 272
dropout_6 (Dropout) (None, 16) 0
dense_27 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_7"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_8 (InputLayer) [(None, 7)] 0
dense_28 (Dense) (None, 32) 256
dense_29 (Dense) (None, 16) 528
dense_30 (Dense) (None, 16) 272
dropout_7 (Dropout) (None, 16) 0
dense_31 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_8"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_9 (InputLayer) [(None, 7)] 0
dense_32 (Dense) (None, 32) 256
dense_33 (Dense) (None, 16) 528
dense_34 (Dense) (None, 16) 272
dropout_8 (Dropout) (None, 16) 0
dense_35 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_9"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_10 (InputLayer) [(None, 7)] 0
dense_36 (Dense) (None, 32) 256
dense_37 (Dense) (None, 16) 528
dense_38 (Dense) (None, 16) 272
dropout_9 (Dropout) (None, 16) 0
dense_39 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_10"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_11 (InputLayer) [(None, 7)] 0
dense_40 (Dense) (None, 32) 256
dense_41 (Dense) (None, 16) 528
dense_42 (Dense) (None, 16) 272
dropout_10 (Dropout) (None, 16) 0
dense_43 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_11"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_12 (InputLayer) [(None, 7)] 0
dense_44 (Dense) (None, 32) 256
dense_45 (Dense) (None, 16) 528
dense_46 (Dense) (None, 16) 272
dropout_11 (Dropout) (None, 16) 0
dense_47 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_12"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_13 (InputLayer) [(None, 7)] 0
dense_48 (Dense) (None, 32) 256
dense_49 (Dense) (None, 16) 528
dense_50 (Dense) (None, 16) 272
dropout_12 (Dropout) (None, 16) 0
dense_51 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_13"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_14 (InputLayer) [(None, 7)] 0
dense_52 (Dense) (None, 32) 256
dense_53 (Dense) (None, 16) 528
dense_54 (Dense) (None, 16) 272
dropout_13 (Dropout) (None, 16) 0
dense_55 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_14"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_15 (InputLayer) [(None, 7)] 0
dense_56 (Dense) (None, 32) 256
dense_57 (Dense) (None, 16) 528
dense_58 (Dense) (None, 16) 272
dropout_14 (Dropout) (None, 16) 0
dense_59 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_15"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_16 (InputLayer) [(None, 7)] 0
dense_60 (Dense) (None, 32) 256
dense_61 (Dense) (None, 16) 528
dense_62 (Dense) (None, 16) 272
dropout_15 (Dropout) (None, 16) 0
dense_63 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_16"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_17 (InputLayer) [(None, 7)] 0
dense_64 (Dense) (None, 32) 256
dense_65 (Dense) (None, 16) 528
dense_66 (Dense) (None, 16) 272
dropout_16 (Dropout) (None, 16) 0
dense_67 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights_at = os.path.join('keras_models', 'PRSA_data_price_MLP7_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best7 = ModelCheckpoint(save_weights_at, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP7.
import os
from joblib import dump, load
history_price_MPL7 = []
# Iterar sobre cada modelo en la lista models_MLP7
for i, model in enumerate(models_MLP7):
filename = f'history_price_MPL7_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_price7, y=y_train_price7, batch_size=16, epochs=100,
verbose=2, callbacks=[save_best7], validation_data=(X_val_price7, y_val_price7),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_price_MPL7.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_price_MPL7_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL7_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL7_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL7_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL7_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL7_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL7_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL7_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL7_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL7_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL7_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL7_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL7_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL7_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL7_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL7_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_price_MLP7_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model7 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model7 = load_model(best_model_path) # Cargar el modelo
if best_model7 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_price_MLP7_weights.10-1248.9592.keras con val_loss: 1248.9592
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
print("Archivos en el directorio 'keras_models':", files)
Archivos en el directorio 'keras_models': ['PRSA_data_vola14_MLP7_weights.04-0.0018.keras', 'PRSA_data_price_MLP21_weights.01-3579.4128.keras', 'PRSA_data_price_LSTM21_weights.01-43062.3047.keras', 'PRSA_data_price_LSTM28_weights.02-42892.2305.keras', 'PRSA_data_price_LSTM_weights.19-67016.3047.keras', 'PRSA_data_at_LSTM28_weights.19-0.0651.keras', 'PRSA_data_vola14_MLP21_weights.20-0.0011.keras', 'PRSA_data_price_LSTM14_weights.06-51394.8750.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.07-0.0033.keras', 'PRSA_data_price_LSTM21_weights.02-43052.0469.keras', 'PRSA_data_at_MLP7_weights.80-0.0780.keras', 'PRSA_data_vola21_LSTM28_weights.02-0.0022.keras', 'PRSA_data_vola21_LSTM14_weights.04-0.0011.keras', 'PRSA_data_vola7_LSTM21_weights.18-0.0029.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0013.keras', 'PRSA_data_vola28_MLP14_weights.04-0.0013.keras', 'PRSA_data_price_MLP14_weights.52-626.2623.keras', 'PRSA_data_vola7_MLP7_weights.15-0.0041.keras', 'PRSA_data_price_LSTM14_weights.13-51325.8516.keras', 'PRSA_data_vola14_MLP7_weights.02-0.0023.keras', 'PRSA_data_price_LSTM21_weights.11-42962.6562.keras', 'PRSA_data_vola7_LSTM28_weights.10-0.0037.keras', 'PRSA_data_at_LSTM_weights.04-1.2071.keras', 'PRSA_data_price_MLP7_weights.50-1342.5273.keras', 'PRSA_data_price_MLP14_weights.03-5108.7837.keras', 'PRSA_data_vola28_MLP7_weights.14-0.0012.keras', 'PRSA_data_at_MLP7_weights.11-2.4883.keras', 'PRSA_data_price_LSTM28_weights.08-42832.5430.keras', 'PRSA_data_price_MLP28_weights.01-9242.6553.keras', 'PRSA_data_vola21_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.16-1013.2251.keras', 'PRSA_data_price_MLP28_weights.13-1042.5178.keras', 'PRSA_data_vola28_MLP28_weights.34-0.0006.keras', 'PRSA_data_at_MLP7_weights.01-19.6635.keras', 'PRSA_data_at_MLP7_weights.19-1.8379.keras', 'PRSA_data_at_MLP7_weights.12-2.3991.keras', 'PRSA_data_price_MLP7_weights.15-1346.3275.keras', 'PRSA_data_vola14_MLP28_weights.02-0.0024.keras', 'PRSA_data_at_LSTM21_weights.05-0.7513.keras', 'PRSA_data_vola14_MLP28_weights.07-0.0017.keras', 'PRSA_data_at_MLP28_weights.05-0.0930.keras', 'PRSA_data_at_LSTM28_weights.02-2.0782.keras', 'PRSA_data_price_LSTM14_weights.20-51256.7266.keras', 'PRSA_data_vola21_LSTM21_weights.05-0.0013.keras', 'PRSA_data_price_MLP14_weights.12-1304.7009.keras', 'PRSA_data_vola7_LSTM14_weights.09-0.0044.keras', 'PRSA_data_vola28_MLP28_weights.50-0.0006.keras', 'PRSA_data_vola7_MLP14_weights.15-0.0020.keras', 'PRSA_data_price_MLP14_weights.08-1386.5365.keras', 'PRSA_data_price_LSTM21_weights.09-42982.2383.keras', 'PRSA_data_price_MLP28_weights.11-7366.8994.keras', 'PRSA_data_vola14_MLP7_weights.18-0.0015.keras', 'PRSA_data_price_MLP7_weights.14-1734.7238.keras', 'PRSA_data_at_LSTM28_weights.03-1.3148.keras', 'PRSA_data_price_LSTM14_weights.15-51306.2070.keras', 'PRSA_data_vola21_MLP28_weights.25-0.0010.keras', 'PRSA_data_at_LSTM28_weights.10-0.2820.keras', 'PRSA_data_at_LSTM14_weights.02-2.2175.keras', 'PRSA_data_vola28_LSTM7_weights.08-0.0014.keras', 'PRSA_data_price_MLP28_weights.10-1107.4047.keras', 'PRSA_data_vola28_MLP7_weights.01-0.0068.keras', 'PRSA_data_vola21_LSTM21_weights.03-0.0015.keras', 'PRSA_data_price_MLP7_weights.01-9253.3047.keras', 'PRSA_data_vola21_MLP14_weights.31-0.0007.keras', 'PRSA_data_price_LSTM28_weights.12-42793.6172.keras', 'PRSA_data_price_LSTM14_weights.09-51365.0000.keras', 'PRSA_data_vola28_MLP28_weights.42-0.0006.keras', 'PRSA_data_price_MLP7_weights.05-4792.5054.keras', 'PRSA_data_vola21_LSTM_weights.01-0.0024.keras', 'PRSA_data_price_LSTM28_weights.17-42745.0352.keras', 'PRSA_data_vola28_MLP28_weights.45-0.0006.keras', 'PRSA_data_vola28_MLP14_weights.08-0.0010.keras', 'PRSA_data_at_LSTM_weights.11-0.6498.keras', 'PRSA_data_vola28_MLP28_weights.39-0.0007.keras', 'PRSA_data_at_MLP21_weights.11-0.0326.keras', 'PRSA_data_price_LSTM_weights.20-67006.4141.keras', 'PRSA_data_vola28_MLP7_weights.06-0.0014.keras', 'PRSA_data_at_LSTM14_weights.06-0.8145.keras', 'PRSA_data_at_MLP28_weights.01-2.1902.keras', 'PRSA_data_price_MLP14_weights.46-814.1992.keras', '.DS_Store', 'PRSA_data_at_LSTM21_weights.01-5.9452.keras', 'PRSA_data_vola28_LSTM28_weights.12-0.0006.keras', 'PRSA_data_price_LSTM14_weights.18-51276.5625.keras', 'PRSA_data_vola21_LSTM14_weights.01-0.0078.keras', 'PRSA_data_vola14_MLP21_weights.04-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM14_weights.02-0.0011.keras', 'PRSA_data_at_MLP7_weights.09-2.9793.keras', 'PRSA_data_vola21_MLP14_weights.06-0.0007.keras', 'PRSA_data_vola14_MLP21_weights.78-0.0010.keras', 'PRSA_data_price_MLP14_weights.64-711.9107.keras', 'PRSA_data_vola14_MLP28_weights.31-0.0011.keras', 'PRSA_data_vola14_LSTM_weights.08-0.0020.keras', 'PRSA_data_vola14_LSTM21_weights.02-0.0018.keras', 'PRSA_data_at_LSTM21_weights.02-2.0083.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0014.keras', 'PRSA_data_vola14_MLP14_weights.29-0.0005.keras', 'PRSA_data_price_LSTM_weights.03-67175.7734.keras', 'PRSA_data_vola14_LSTM_weights.01-0.0068.keras', 'PRSA_data_price_LSTM28_weights.11-42803.3086.keras', 'PRSA_data_price_MLP28_weights.15-7107.3306.keras', 'PRSA_data_vola14_MLP21_weights.49-0.0010.keras', 'PRSA_data_vola28_MLP7_weights.07-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0010.keras', 'PRSA_data_at_LSTM14_weights.15-0.4082.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0011.keras', 'PRSA_data_at_LSTM21_weights.19-0.3670.keras', 'PRSA_data_vola14_MLP14_weights.06-0.0006.keras', 'PRSA_data_vola14_LSTM21_weights.06-0.0012.keras', 'PRSA_data_price_LSTM_weights.17-67036.2891.keras', 'PRSA_data_vola28_MLP7_weights.03-0.0018.keras', 'PRSA_data_vola7_MLP28_weights.02-0.0039.keras', 'PRSA_data_vola7_LSTM28_weights.16-0.0035.keras', 'PRSA_data_vola21_MLP7_weights.20-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.33-0.0009.keras', 'PRSA_data_price_MLP21_weights.64-870.7020.keras', 'PRSA_data_vola21_MLP14_weights.22-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.14-0.0020.keras', 'PRSA_data_price_LSTM21_weights.17-42903.9648.keras', 'PRSA_data_vola21_MLP14_weights.01-0.0021.keras', 'PRSA_data_vola21_LSTM7_weights.16-0.0011.keras', 'PRSA_data_at_LSTM21_weights.17-0.5070.keras', 'PRSA_data_at_LSTM14_weights.11-0.6656.keras', 'PRSA_data_vola28_MLP7_weights.02-0.0019.keras', 'PRSA_data_vola14_MLP28_weights.05-0.0018.keras', 'PRSA_data_vola14_LSTM14_weights.08-0.0005.keras', 'PRSA_data_at_MLP21_weights.02-0.8171.keras', 'PRSA_data_price_MLP28_weights.02-8134.2202.keras', 'PRSA_data_price_LSTM28_weights.06-42852.3320.keras', 'PRSA_data_vola7_MLP14_weights.02-0.0036.keras', 'PRSA_data_vola14_LSTM28_weights.01-0.0026.keras', 'PRSA_data_at_MLP7_weights.04-9.8837.keras', 'PRSA_data_price_MLP28_weights.16-1274.4368.keras', 'PRSA_data_price_LSTM21_weights.07-43002.0078.keras', 'PRSA_data_vola7_MLP28_weights.23-0.0032.keras', 'PRSA_data_price_LSTM_weights.01-67196.3359.keras', 'PRSA_data_at_MLP28_weights.02-1.9156.keras', 'PRSA_data_vola14_MLP21_weights.02-0.0016.keras', 'PRSA_data_vola28_MLP14_weights.14-0.0007.keras', 'PRSA_data_price_LSTM28_weights.07-42842.4375.keras', 'PRSA_data_price_MLP14_weights.37-825.1805.keras', 'PRSA_data_vola7_MLP14_weights.07-0.0021.keras', 'PRSA_data_price_LSTM14_weights.04-51414.8945.keras', 'PRSA_data_price_MLP7_weights.42-1446.1339.keras', 'PRSA_data_price_MLP21_weights.57-932.6497.keras', 'PRSA_data_price_LSTM14_weights.19-51266.6133.keras', 'PRSA_data_vola21_MLP14_weights.34-0.0008.keras', 'PRSA_data_price_MLP14_weights.01-10485.8262.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0012.keras', 'PRSA_data_at_LSTM_weights.02-2.4966.keras', 'PRSA_data_at_LSTM28_weights.08-0.3981.keras', 'PRSA_data_vola21_LSTM28_weights.15-0.0009.keras', 'PRSA_data_price_LSTM_weights.20-67006.3047.keras', 'PRSA_data_price_LSTM_weights.14-67066.1016.keras', 'PRSA_data_price_LSTM21_weights.19-42884.2422.keras', 'PRSA_data_vola28_LSTM21_weights.18-0.0008.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0013.keras', 'PRSA_data_price_LSTM28_weights.20-42714.5508.keras', 'PRSA_data_at_LSTM14_weights.13-0.5609.keras', 'PRSA_data_vola28_LSTM28_weights.04-0.0009.keras', 'PRSA_data_vola14_MLP21_weights.01-0.0118.keras', 'PRSA_data_vola7_MLP21_weights.02-0.0032.keras', 'PRSA_data_at_MLP7_weights.74-0.2838.keras', 'PRSA_data_price_MLP28_weights.17-1217.0461.keras', 'PRSA_data_at_MLP21_weights.16-1.1973.keras', 'PRSA_data_vola7_MLP7_weights.02-0.0036.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0015.keras', 'PRSA_data_vola14_LSTM14_weights.04-0.0006.keras', 'PRSA_data_vola28_LSTM28_weights.18-0.0006.keras', 'PRSA_data_price_MLP21_weights.74-805.9489.keras', 'PRSA_data_vola21_MLP14_weights.02-0.0011.keras', 'PRSA_data_vola28_MLP21_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM_weights.11-0.0014.keras', 'PRSA_data_vola7_LSTM14_weights.14-0.0036.keras', 'PRSA_data_at_MLP28_weights.03-1.7712.keras', 'PRSA_data_price_LSTM21_weights.20-42874.3555.keras', 'PRSA_data_vola28_LSTM7_weights.20-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.04-0.0009.keras', 'PRSA_data_at_MLP7_weights.08-3.6716.keras', 'PRSA_data_at_MLP21_weights.04-0.0734.keras', 'PRSA_data_vola21_LSTM28_weights.03-0.0021.keras', 'PRSA_data_vola21_MLP14_weights.35-0.0008.keras', 'PRSA_data_vola28_LSTM7_weights.11-0.0012.keras', 'PRSA_data_price_LSTM28_weights.04-42872.1953.keras', 'PRSA_data_at_LSTM_weights.08-0.4803.keras', 'PRSA_data_vola28_MLP14_weights.05-0.0010.keras', 'PRSA_data_price_LSTM14_weights.14-51316.0625.keras', 'PRSA_data_vola7_MLP14_weights.01-0.0101.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0015.keras', 'PRSA_data_vola7_LSTM_weights.01-0.0040.keras', 'PRSA_data_vola14_MLP21_weights.09-0.0011.keras', 'PRSA_data_price_LSTM21_weights.05-43021.9570.keras', 'PRSA_data_vola28_MLP28_weights.03-0.0007.keras', 'PRSA_data_vola7_LSTM14_weights.10-0.0042.keras', 'PRSA_data_price_LSTM_weights.07-67135.3359.keras', 'PRSA_data_vola7_MLP14_weights.18-0.0020.keras', 'PRSA_data_price_LSTM21_weights.18-42894.1055.keras', 'PRSA_data_vola7_MLP7_weights.01-0.0042.keras', 'PRSA_data_vola14_LSTM_weights.05-0.0024.keras', 'PRSA_data_vola21_MLP14_weights.27-0.0008.keras', 'PRSA_data_vola28_MLP28_weights.02-0.0018.keras', 'PRSA_data_vola21_LSTM7_weights.03-0.0017.keras', 'PRSA_data_price_LSTM_weights.10-67105.4297.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0010.keras', 'PRSA_data_vola7_MLP21_weights.03-0.0030.keras', 'PRSA_data_at_MLP7_weights.06-5.3724.keras', 'PRSA_data_at_LSTM_weights.05-1.1226.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0026.keras', 'PRSA_data_vola21_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola14_MLP21_weights.05-0.0012.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.08-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.47-0.0008.keras', 'PRSA_data_price_LSTM14_weights.16-51296.3633.keras', 'PRSA_data_at_MLP7_weights.02-16.3421.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0030.keras', 'PRSA_data_vola21_MLP21_weights.09-0.0009.keras', 'PRSA_data_at_LSTM14_weights.01-6.2168.keras', 'PRSA_data_vola28_LSTM21_weights.01-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.16-0.0009.keras', 'PRSA_data_vola21_LSTM28_weights.07-0.0016.keras', 'PRSA_data_vola21_MLP28_weights.19-0.0010.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0009.keras', 'PRSA_data_vola14_MLP7_weights.09-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.02-0.0011.keras', 'PRSA_data_vola28_LSTM14_weights.03-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.09-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.09-0.0012.keras', 'PRSA_data_vola28_MLP21_weights.65-0.0007.keras', 'PRSA_data_vola21_MLP28_weights.15-0.0011.keras', 'PRSA_data_at_LSTM28_weights.04-0.7851.keras', 'PRSA_data_at_MLP7_weights.42-0.8694.keras', 'PRSA_data_vola21_LSTM28_weights.18-0.0008.keras', 'PRSA_data_vola21_MLP14_weights.04-0.0008.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0040.keras', 'PRSA_data_vola21_LSTM14_weights.12-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0029.keras', 'PRSA_data_vola21_MLP28_weights.01-0.0026.keras', 'PRSA_data_price_LSTM_weights.13-67075.9453.keras', 'PRSA_data_vola14_MLP7_weights.01-0.0041.keras', 'PRSA_data_price_MLP21_weights.02-1476.1034.keras', 'PRSA_data_vola14_LSTM28_weights.19-0.0014.keras', 'PRSA_data_price_MLP14_weights.96-663.0834.keras', 'PRSA_data_vola7_MLP21_weights.94-0.0024.keras', 'PRSA_data_price_LSTM21_weights.03-43041.9570.keras', 'PRSA_data_price_MLP7_weights.12-3798.9531.keras', 'PRSA_data_price_LSTM28_weights.10-42813.0352.keras', 'PRSA_data_vola14_MLP28_weights.20-0.0014.keras', 'PRSA_data_vola7_MLP21_weights.04-0.0027.keras', 'PRSA_data_vola7_LSTM28_weights.18-0.0035.keras', 'PRSA_data_at_MLP7_weights.10-2.7632.keras', 'PRSA_data_at_LSTM21_weights.18-0.3561.keras', 'PRSA_data_price_LSTM_weights.06-67145.4141.keras', 'PRSA_data_price_LSTM28_weights.13-42783.9805.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM28_weights.01-42902.4570.keras', 'PRSA_data_price_LSTM_weights.20-67005.9609.keras', 'PRSA_data_vola7_LSTM21_weights.04-0.0034.keras', 'PRSA_data_at_MLP14_weights.03-0.0461.keras', 'PRSA_data_at_LSTM21_weights.07-0.5113.keras', 'PRSA_data_vola28_MLP28_weights.01-0.0042.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0455.keras', 'PRSA_data_price_MLP28_weights.17-1182.9105.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0018.keras', 'PRSA_data_vola28_MLP14_weights.38-0.0006.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0013.keras', 'PRSA_data_vola7_LSTM21_weights.01-0.0038.keras', 'PRSA_data_price_LSTM14_weights.17-51286.4570.keras', 'PRSA_data_price_LSTM28_weights.16-42754.8008.keras', 'PRSA_data_price_LSTM14_weights.11-51345.4023.keras', 'PRSA_data_price_MLP14_weights.22-638.4409.keras', 'PRSA_data_vola7_MLP28_weights.32-0.0029.keras', 'PRSA_data_price_MLP7_weights.48-1648.1864.keras', 'PRSA_data_vola28_LSTM28_weights.10-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.03-0.0021.keras', 'PRSA_data_vola21_LSTM21_weights.13-0.0010.keras', 'PRSA_data_at_MLP7_weights.07-4.2627.keras', 'PRSA_data_vola7_LSTM21_weights.17-0.0029.keras', 'PRSA_data_vola21_LSTM28_weights.08-0.0011.keras', 'PRSA_data_price_MLP21_weights.05-1072.5081.keras', 'PRSA_data_at_LSTM21_weights.06-0.6219.keras', 'PRSA_data_vola7_LSTM21_weights.05-0.0030.keras', 'PRSA_data_price_MLP14_weights.29-856.8320.keras', 'PRSA_data_vola14_LSTM21_weights.11-0.0011.keras', 'PRSA_data_price_LSTM21_weights.10-42972.4219.keras', 'PRSA_data_at_LSTM21_weights.19-0.2744.keras', 'PRSA_data_price_LSTM_weights.02-67185.9453.keras', 'PRSA_data_vola28_MLP14_weights.21-0.0007.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.10-0.0009.keras', 'PRSA_data_price_LSTM28_weights.15-42764.5898.keras', 'PRSA_data_at_LSTM14_weights.07-0.7059.keras', 'PRSA_data_vola21_LSTM21_weights.01-0.0048.keras', 'PRSA_data_at_LSTM14_weights.03-1.3422.keras', 'PRSA_data_vola14_LSTM14_weights.02-0.0015.keras', 'PRSA_data_vola14_MLP14_weights.02-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.02-0.0034.keras', 'PRSA_data_vola7_MLP21_weights.21-0.0025.keras', 'PRSA_data_at_MLP7_weights.18-1.8396.keras', 'PRSA_data_price_LSTM_weights.11-67095.6172.keras', 'PRSA_data_at_LSTM14_weights.12-0.6378.keras', 'PRSA_data_vola21_LSTM21_weights.07-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.45-0.0030.keras', 'PRSA_data_at_MLP7_weights.26-0.8897.keras', 'PRSA_data_vola21_MLP7_weights.01-0.0014.keras', 'PRSA_data_price_MLP14_weights.19-982.5739.keras', 'PRSA_data_vola14_LSTM_weights.16-0.0017.keras', 'PRSA_data_price_MLP21_weights.94-813.8954.keras', 'PRSA_data_vola7_LSTM21_weights.20-0.0029.keras', 'PRSA_data_at_LSTM_weights.03-1.4842.keras', 'PRSA_data_vola21_MLP28_weights.02-0.0015.keras', 'PRSA_data_price_LSTM21_weights.12-42952.9453.keras', 'PRSA_data_price_MLP28_weights.17-1019.2816.keras', 'PRSA_data_vola14_LSTM_weights.13-0.0019.keras', 'PRSA_data_vola7_LSTM_weights.02-0.0038.keras', 'PRSA_data_vola7_LSTM28_weights.02-0.0042.keras', 'PRSA_data_at_LSTM28_weights.12-0.0705.keras', 'PRSA_data_at_LSTM14_weights.13-0.2270.keras', 'PRSA_data_price_LSTM_weights.18-67026.2891.keras', 'PRSA_data_vola28_MLP21_weights.66-0.0007.keras', 'PRSA_data_at_MLP14_weights.42-0.0373.keras', 'PRSA_data_vola7_MLP14_weights.38-0.0020.keras', 'PRSA_data_vola7_LSTM21_weights.11-0.0029.keras', 'PRSA_data_vola21_LSTM21_weights.12-0.0010.keras', 'PRSA_data_at_LSTM_weights.14-0.5158.keras', 'PRSA_data_vola14_LSTM21_weights.01-0.0019.keras', 'PRSA_data_vola28_LSTM21_weights.11-0.0009.keras', 'PRSA_data_vola7_LSTM28_weights.05-0.0038.keras', 'PRSA_data_vola14_MLP14_weights.10-0.0005.keras', 'PRSA_data_at_MLP21_weights.03-2.1540.keras', 'PRSA_data_at_LSTM_weights.16-0.5377.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.44-0.0008.keras', 'PRSA_data_at_MLP7_weights.16-1.9941.keras', 'PRSA_data_vola28_LSTM28_weights.01-0.0010.keras', 'PRSA_data_vola28_LSTM14_weights.19-0.0006.keras', 'PRSA_data_price_LSTM21_weights.14-42933.4180.keras', 'PRSA_data_vola14_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.82-828.7976.keras', 'PRSA_data_vola28_MLP21_weights.01-0.0030.keras', 'PRSA_data_price_LSTM14_weights.10-51355.2070.keras', 'PRSA_data_vola21_MLP21_weights.18-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.73-0.0007.keras', 'PRSA_data_vola28_MLP7_weights.20-0.0011.keras', 'PRSA_data_price_LSTM21_weights.13-42943.1719.keras', 'PRSA_data_vola28_MLP28_weights.36-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.04-0.0025.keras', 'PRSA_data_at_MLP14_weights.02-0.2715.keras', 'PRSA_data_price_LSTM14_weights.05-51404.8945.keras', 'PRSA_data_price_LSTM28_weights.19-42725.4297.keras', 'PRSA_data_price_MLP28_weights.01-1561.6656.keras', 'PRSA_data_vola21_MLP7_weights.14-0.0011.keras', 'PRSA_data_vola7_MLP7_weights.23-0.0033.keras', 'PRSA_data_vola14_MLP28_weights.01-0.0026.keras', 'PRSA_data_at_MLP14_weights.29-0.0323.keras', 'PRSA_data_vola7_MLP21_weights.01-0.0033.keras', 'PRSA_data_vola21_LSTM_weights.19-0.0014.keras', 'PRSA_data_price_MLP7_weights.03-5953.8076.keras', 'PRSA_data_price_MLP21_weights.42-948.6628.keras', 'PRSA_data_vola21_LSTM14_weights.07-0.0008.keras', 'PRSA_data_vola14_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola7_LSTM28_weights.13-0.0037.keras', 'PRSA_data_vola21_MLP21_weights.01-0.0016.keras', 'PRSA_data_vola14_LSTM21_weights.08-0.0011.keras', 'PRSA_data_price_LSTM21_weights.06-43011.9570.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0019.keras', 'PRSA_data_vola7_MLP28_weights.16-0.0033.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0008.keras', 'PRSA_data_vola28_MLP14_weights.01-0.0031.keras', 'PRSA_data_vola14_MLP28_weights.11-0.0015.keras', 'PRSA_data_price_LSTM14_weights.03-51425.0000.keras', 'PRSA_data_vola7_LSTM28_weights.04-0.0039.keras', 'PRSA_data_vola28_LSTM7_weights.13-0.0011.keras', 'PRSA_data_price_LSTM14_weights.01-51445.4883.keras', 'PRSA_data_vola28_MLP28_weights.22-0.0006.keras', 'PRSA_data_price_LSTM_weights.09-67115.3359.keras', 'PRSA_data_vola28_LSTM7_weights.14-0.0011.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0050.keras', 'PRSA_data_vola21_MLP28_weights.17-0.0010.keras', 'PRSA_data_vola28_LSTM21_weights.09-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.13-0.0010.keras', 'PRSA_data_vola14_MLP14_weights.01-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.50-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.2184.keras', 'PRSA_data_vola14_LSTM_weights.02-0.0030.keras', 'PRSA_data_vola7_MLP28_weights.39-0.0030.keras', 'PRSA_data_price_LSTM21_weights.04-43031.9414.keras', 'PRSA_data_price_LSTM_weights.15-67056.2109.keras', 'PRSA_data_at_MLP7_weights.20-1.6814.keras', 'PRSA_data_vola28_LSTM7_weights.18-0.0010.keras', 'PRSA_data_at_MLP7_weights.95-0.2130.keras', 'PRSA_data_vola14_LSTM28_weights.03-0.0021.keras', 'PRSA_data_at_LSTM_weights.01-7.5653.keras', 'PRSA_data_at_MLP7_weights.05-7.2110.keras', 'PRSA_data_vola21_LSTM28_weights.20-0.0008.keras', 'PRSA_data_at_LSTM_weights.07-0.8569.keras', 'PRSA_data_vola14_LSTM28_weights.12-0.0014.keras', 'PRSA_data_price_LSTM21_weights.16-42913.7969.keras', 'PRSA_data_at_MLP28_weights.04-1.3772.keras', 'PRSA_data_price_LSTM_weights.08-67125.3047.keras', 'PRSA_data_price_LSTM14_weights.07-51384.8633.keras', 'PRSA_data_price_LSTM_weights.04-67165.6016.keras', 'PRSA_data_vola14_LSTM28_weights.15-0.0014.keras', 'PRSA_data_at_MLP14_weights.01-0.4195.keras', 'PRSA_data_price_LSTM28_weights.09-42822.7578.keras', 'PRSA_data_price_LSTM28_weights.03-42882.1953.keras', 'PRSA_data_at_LSTM14_weights.05-0.8408.keras', 'PRSA_data_vola7_MLP28_weights.08-0.0030.keras', 'PRSA_data_price_MLP14_weights.07-3004.8474.keras', 'PRSA_data_price_LSTM21_weights.15-42923.6367.keras', 'PRSA_data_vola7_MLP21_weights.81-0.0025.keras', 'PRSA_data_price_MLP21_weights.86-819.7106.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.05-0.0012.keras', 'PRSA_data_vola21_MLP14_weights.20-0.0007.keras', 'PRSA_data_vola14_MLP28_weights.42-0.0011.keras', 'PRSA_data_price_LSTM_weights.12-67085.7578.keras', 'PRSA_data_vola28_MLP21_weights.13-0.0008.keras', 'PRSA_data_vola14_MLP28_weights.08-0.0016.keras', 'PRSA_data_at_LSTM28_weights.05-0.5407.keras', 'PRSA_data_at_MLP7_weights.86-0.0883.keras', 'PRSA_data_vola28_LSTM14_weights.01-0.0020.keras', 'PRSA_data_vola21_MLP21_weights.71-0.0010.keras', 'PRSA_data_at_MLP28_weights.09-0.1537.keras', 'PRSA_data_vola7_MLP28_weights.48-0.0030.keras', 'PRSA_data_at_MLP28_weights.12-0.0354.keras', 'PRSA_data_at_LSTM14_weights.04-0.9732.keras', 'PRSA_data_at_LSTM14_weights.15-0.2154.keras', 'PRSA_data_vola14_MLP28_weights.21-0.0013.keras', 'PRSA_data_vola21_MLP14_weights.25-0.0009.keras', 'PRSA_data_vola14_LSTM14_weights.01-0.0027.keras', 'PRSA_data_at_LSTM21_weights.20-0.0855.keras', 'PRSA_data_vola21_LSTM28_weights.01-0.0039.keras', 'PRSA_data_price_MLP14_weights.02-8059.2510.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0013.keras', 'PRSA_data_vola28_MLP7_weights.04-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.02-0.0043.keras', 'PRSA_data_price_MLP14_weights.78-683.9230.keras', 'PRSA_data_price_MLP21_weights.19-1002.5925.keras', 'PRSA_data_vola28_MLP21_weights.92-0.0007.keras', 'PRSA_data_price_LSTM28_weights.14-42774.2500.keras', 'PRSA_data_vola21_MLP21_weights.06-0.0010.keras', 'PRSA_data_vola14_LSTM14_weights.03-0.0010.keras', 'PRSA_data_price_LSTM_weights.16-67046.2422.keras', 'PRSA_data_vola7_LSTM28_weights.07-0.0037.keras', 'PRSA_data_vola28_LSTM7_weights.01-0.0017.keras', 'PRSA_data_vola28_LSTM14_weights.02-0.0008.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0022.keras', 'PRSA_data_at_MLP21_weights.03-0.2039.keras', 'PRSA_data_vola14_LSTM28_weights.02-0.0023.keras', 'PRSA_data_at_MLP7_weights.03-13.2212.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0037.keras', 'PRSA_data_vola14_MLP21_weights.69-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.18-0.0013.keras', 'PRSA_data_vola7_MLP21_weights.49-0.0025.keras', 'PRSA_data_vola28_MLP28_weights.31-0.0006.keras', 'PRSA_data_price_MLP21_weights.77-841.5739.keras', 'PRSA_data_vola28_MLP14_weights.35-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.26-0.0031.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0065.keras', 'PRSA_data_price_LSTM14_weights.02-51435.1133.keras', 'PRSA_data_price_LSTM28_weights.20-42714.7695.keras', 'PRSA_data_vola28_LSTM28_weights.16-0.0006.keras', 'PRSA_data_vola14_LSTM28_weights.06-0.0016.keras', 'PRSA_data_vola21_MLP21_weights.13-0.0010.keras', 'PRSA_data_vola28_MLP14_weights.32-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.01-0.0043.keras', 'PRSA_data_vola21_MLP28_weights.08-0.0011.keras', 'PRSA_data_vola21_LSTM28_weights.10-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.14-0.0012.keras', 'PRSA_data_vola7_LSTM28_weights.01-0.0049.keras', 'PRSA_data_price_LSTM28_weights.20-42715.6250.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM_weights.05-67155.5078.keras', 'PRSA_data_price_MLP14_weights.14-1073.6493.keras', 'PRSA_data_vola14_LSTM_weights.03-0.0029.keras', 'PRSA_data_vola14_MLP7_weights.07-0.0021.keras', 'PRSA_data_at_MLP28_weights.03-0.0637.keras', 'PRSA_data_vola21_MLP28_weights.03-0.0012.keras', 'PRSA_data_at_MLP28_weights.05-0.1823.keras', 'PRSA_data_at_MLP21_weights.01-0.9762.keras', 'PRSA_data_vola7_LSTM28_weights.15-0.0035.keras', 'PRSA_data_price_LSTM21_weights.08-42992.0703.keras', 'PRSA_data_vola21_MLP21_weights.22-0.0010.keras', 'PRSA_data_at_LSTM28_weights.01-6.0769.keras', 'PRSA_data_vola28_LSTM28_weights.20-0.0006.keras', 'PRSA_data_at_MLP21_weights.01-4.3241.keras', 'PRSA_data_at_LSTM_weights.06-0.8671.keras', 'PRSA_data_price_MLP7_weights.37-1654.4709.keras', 'PRSA_data_at_LSTM_weights.20-0.4626.keras', 'PRSA_data_vola21_MLP28_weights.27-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.4729.keras', 'PRSA_data_at_MLP7_weights.24-1.4718.keras', 'PRSA_data_at_MLP7_weights.59-0.4636.keras', 'PRSA_data_vola14_LSTM14_weights.06-0.0005.keras', 'PRSA_data_at_MLP7_weights.14-2.2322.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0196.keras', 'PRSA_data_at_LSTM14_weights.19-0.3356.keras', 'PRSA_data_price_LSTM28_weights.18-42735.2305.keras', 'PRSA_data_at_LSTM21_weights.04-0.8083.keras', 'PRSA_data_price_LSTM14_weights.12-51335.6523.keras', 'PRSA_data_vola21_LSTM_weights.06-0.0017.keras', 'PRSA_data_price_LSTM28_weights.05-42862.2383.keras', 'PRSA_data_price_MLP7_weights.10-1248.9592.keras', 'PRSA_data_vola21_MLP28_weights.35-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.03-0.0013.keras', 'PRSA_data_at_LSTM21_weights.03-1.0323.keras', 'PRSA_data_price_MLP14_weights.04-5028.7607.keras', 'PRSA_data_price_LSTM14_weights.08-51374.8945.keras', 'PRSA_data_vola28_MLP14_weights.48-0.0007.keras']
# Predicciones usando el mejor modelo
if best_model7 is not None:
train_preds_price_MLP7 = best_model7.predict(X_train_price7)
val_preds_price_MLP7 = best_model7.predict(X_val_price7)
test_preds_price_MLP7 = best_model7.predict(X_test_price7)
# Aplanar las predicciones si es necesario
train_preds_price_MLP7 = np.squeeze(train_preds_price_MLP7)
val_preds_price_MLP7 = np.squeeze(val_preds_price_MLP7)
test_preds_price_MLP7 = np.squeeze(test_preds_price_MLP7)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_price_MLP7)
print("Predicciones de validación:", val_preds_price_MLP7)
print("Predicciones de prueba:", test_preds_price_MLP7)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
1/156 [..............................] - ETA: 5s
156/156 [==============================] - 0s 163us/step
1/1 [==============================] - ETA: 0s
1/1 [==============================] - 0s 5ms/step
1/1 [==============================] - ETA: 0s
1/1 [==============================] - 0s 5ms/step
Predicciones de Entrenamiento: [2.7667654e-01 2.7667654e-01 2.7667654e-01 ... 6.0041758e+04 6.2077590e+04
6.3053477e+04]
Predicciones de validación: [64795.645 66291.18 65770.01 66731.305 66945.586 69419.24 68908.664]
Predicciones de prueba: [69146.27 71416.43 72183.79 73269.2 73095.1 73247.4 71013.57]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 28 días (\(\tau = 28\)).
data_train_plot_price7, data_val_plot_price7, data_test_plot_price7 = data_plot(df_1_st['Price'], 7)
Datos de entrenamiento:
4998 0.1
4997 0.1
4996 0.1
4995 0.1
4994 0.1
...
18 66080.4
17 66855.3
16 68172.0
15 68366.5
14 68964.8
Name: Price, Length: 4985, dtype: float647
Datos de validación:
13 72099.1
12 71470.2
11 73066.3
10 71387.5
9 69463.7
8 65314.2
7 68391.2
Name: Price, dtype: float647
Datos de prueba:
6 67594.1
5 62050.0
4 67854.0
3 65503.8
2 63785.5
1 64037.8
0 67211.9
Name: Price, dtype: float647
plot_model(data_train_plot_price7[-100:], data_val_plot_price7, data_test_plot_price7, val_preds_price_MLP7, test_preds_price_MLP7, "Predicciones usando Perceptrón Multicapa (MLP) para un horizonte de 7 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train7, jarque_bera_pval_MLP_train7 = diagnostic_plots(y_train_price7, train_preds_price_MLP7)
Ljung-Box LB Statistic: 2250.445657
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_price_MLP_train = metricas(y_train_price7,train_preds_price_MLP7)
metrica_price_MLP_train.index = metrica_price_MLP_train.index.map({0: 'MLP Entrenamiento Price τ = 7'})
metrica_price_MLP_train['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train7], index=metrica_price_MLP_train.index)
metrica_price_MLP_train['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train7], index=metrica_price_MLP_train.index)
metrica_price_MLP_train
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Price τ = 7 | 4.5824e+09 | 9.01% | 416.59 | 921829.51 | 99.62% | 0.0 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP7, jarque_bera_pvalMLP7 = evaluate_residuals(data_test_plot_price7, test_preds_price_MLP7)
No se rechaza H0: los residuales son independientes (no correlacionados).
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP7_test = metricas(y_test_price7,test_preds_price_MLP7)
metrica_MLP7_test.index = metrica_MLP7_test.index.map({0: 'MLP Prueba Price τ = 7'})
metrica_MLP7_test['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP7], index=metrica_MLP7_test.index)
metrica_MLP7_test['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP7], index=metrica_MLP7_test.index)
metrica_MLP7_test
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Price τ = 7 | 9.6041e+07 | 4.17% | 2851.11 | 1.3720e+07 | -125.85% | 0.314 | 0.6674 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_price_MPL7)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_price7, train_preds_price_MLP7, y_val_price7, val_preds_price_MLP7, y_test_price7, test_preds_price_MLP7)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo. De igual forma se observa una alta variabilidad en los residuales.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 14 días (\(\tau=14\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(14,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_price14, y_train_price14)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -632.2492492675781
A continuación, configuramos la red MLP empleando la API funcional de Keras. Con este método, una capa puede ser designada como la entrada para la siguiente capa en el momento de su definición.
En este contexto, Input es una función utilizada para establecer una capa de entrada en un modelo de red neuronal. La propiedad shape=(14,) define la estructura de los datos de entrada, lo que indica que estos tendrán 14 dimensiones. Por otro lado, dtype='float32' especifica que los elementos de esta capa serán números de punto flotante de 32 bits
A continuación se define función buil_models para generación de modelos de acuerdo a los parámetros indexados a tráves de la lista.
A idexación de parámetros función build_models_mlp con base a lo indicado en el númeral 3 del parcial práctico.
input_shape14 = 14
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP14 = build_models_mlp(input_shape14, neurons_list, dropout_rates, 'relu')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_18"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_19 (InputLayer) [(None, 14)] 0
dense_72 (Dense) (None, 32) 480
dense_73 (Dense) (None, 16) 528
dense_74 (Dense) (None, 16) 272
dropout_18 (Dropout) (None, 16) 0
dense_75 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_19"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_20 (InputLayer) [(None, 14)] 0
dense_76 (Dense) (None, 32) 480
dense_77 (Dense) (None, 16) 528
dense_78 (Dense) (None, 16) 272
dropout_19 (Dropout) (None, 16) 0
dense_79 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_20"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_21 (InputLayer) [(None, 14)] 0
dense_80 (Dense) (None, 32) 480
dense_81 (Dense) (None, 16) 528
dense_82 (Dense) (None, 16) 272
dropout_20 (Dropout) (None, 16) 0
dense_83 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_21"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_22 (InputLayer) [(None, 14)] 0
dense_84 (Dense) (None, 32) 480
dense_85 (Dense) (None, 16) 528
dense_86 (Dense) (None, 16) 272
dropout_21 (Dropout) (None, 16) 0
dense_87 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_22"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_23 (InputLayer) [(None, 14)] 0
dense_88 (Dense) (None, 32) 480
dense_89 (Dense) (None, 16) 528
dense_90 (Dense) (None, 16) 272
dropout_22 (Dropout) (None, 16) 0
dense_91 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_23"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_24 (InputLayer) [(None, 14)] 0
dense_92 (Dense) (None, 32) 480
dense_93 (Dense) (None, 16) 528
dense_94 (Dense) (None, 16) 272
dropout_23 (Dropout) (None, 16) 0
dense_95 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_24"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_25 (InputLayer) [(None, 14)] 0
dense_96 (Dense) (None, 32) 480
dense_97 (Dense) (None, 16) 528
dense_98 (Dense) (None, 16) 272
dropout_24 (Dropout) (None, 16) 0
dense_99 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_25"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_26 (InputLayer) [(None, 14)] 0
dense_100 (Dense) (None, 32) 480
dense_101 (Dense) (None, 16) 528
dense_102 (Dense) (None, 16) 272
dropout_25 (Dropout) (None, 16) 0
dense_103 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_26"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_27 (InputLayer) [(None, 14)] 0
dense_104 (Dense) (None, 32) 480
dense_105 (Dense) (None, 16) 528
dense_106 (Dense) (None, 16) 272
dropout_26 (Dropout) (None, 16) 0
dense_107 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_27"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_28 (InputLayer) [(None, 14)] 0
dense_108 (Dense) (None, 32) 480
dense_109 (Dense) (None, 16) 528
dense_110 (Dense) (None, 16) 272
dropout_27 (Dropout) (None, 16) 0
dense_111 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_28"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_29 (InputLayer) [(None, 14)] 0
dense_112 (Dense) (None, 32) 480
dense_113 (Dense) (None, 16) 528
dense_114 (Dense) (None, 16) 272
dropout_28 (Dropout) (None, 16) 0
dense_115 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_29"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_30 (InputLayer) [(None, 14)] 0
dense_116 (Dense) (None, 32) 480
dense_117 (Dense) (None, 16) 528
dense_118 (Dense) (None, 16) 272
dropout_29 (Dropout) (None, 16) 0
dense_119 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_30"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_31 (InputLayer) [(None, 14)] 0
dense_120 (Dense) (None, 32) 480
dense_121 (Dense) (None, 16) 528
dense_122 (Dense) (None, 16) 272
dropout_30 (Dropout) (None, 16) 0
dense_123 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_31"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_32 (InputLayer) [(None, 14)] 0
dense_124 (Dense) (None, 32) 480
dense_125 (Dense) (None, 16) 528
dense_126 (Dense) (None, 16) 272
dropout_31 (Dropout) (None, 16) 0
dense_127 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_32"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_33 (InputLayer) [(None, 14)] 0
dense_128 (Dense) (None, 32) 480
dense_129 (Dense) (None, 16) 528
dense_130 (Dense) (None, 16) 272
dropout_32 (Dropout) (None, 16) 0
dense_131 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_33"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_34 (InputLayer) [(None, 14)] 0
dense_132 (Dense) (None, 32) 480
dense_133 (Dense) (None, 16) 528
dense_134 (Dense) (None, 16) 272
dropout_33 (Dropout) (None, 16) 0
dense_135 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights_at = os.path.join('keras_models', 'PRSA_data_price_MLP14_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best14 = ModelCheckpoint(save_weights_at, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP14.
import os
from joblib import dump, load
history_price_MPL14 = []
# Iterar sobre cada modelo en la lista models_MLP14
for i, model in enumerate(models_MLP14):
filename = f'history_price_MPL14_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_price14, y=y_train_price14, batch_size=16, epochs=100,
verbose=2, callbacks=[save_best14], validation_data=(X_val_price14, y_val_price14),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_price_MPL14.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_price_MPL14_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL14_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL14_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL14_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL14_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL14_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL14_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL14_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL14_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL14_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL14_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL14_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL14_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL14_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL14_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL14_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_price_MLP14_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model14 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model14 = load_model(best_model_path) # Cargar el modelo
if best_model14 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_price_MLP14_weights.52-626.2623.keras con val_loss: 626.2623
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
print("Archivos en el directorio 'keras_models':", files)
Archivos en el directorio 'keras_models': ['PRSA_data_vola14_MLP7_weights.04-0.0018.keras', 'PRSA_data_price_MLP21_weights.01-3579.4128.keras', 'PRSA_data_price_LSTM21_weights.01-43062.3047.keras', 'PRSA_data_price_LSTM28_weights.02-42892.2305.keras', 'PRSA_data_price_LSTM_weights.19-67016.3047.keras', 'PRSA_data_at_LSTM28_weights.19-0.0651.keras', 'PRSA_data_vola14_MLP21_weights.20-0.0011.keras', 'PRSA_data_price_LSTM14_weights.06-51394.8750.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.07-0.0033.keras', 'PRSA_data_price_LSTM21_weights.02-43052.0469.keras', 'PRSA_data_at_MLP7_weights.80-0.0780.keras', 'PRSA_data_vola21_LSTM28_weights.02-0.0022.keras', 'PRSA_data_vola21_LSTM14_weights.04-0.0011.keras', 'PRSA_data_vola7_LSTM21_weights.18-0.0029.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0013.keras', 'PRSA_data_vola28_MLP14_weights.04-0.0013.keras', 'PRSA_data_price_MLP14_weights.52-626.2623.keras', 'PRSA_data_vola7_MLP7_weights.15-0.0041.keras', 'PRSA_data_price_LSTM14_weights.13-51325.8516.keras', 'PRSA_data_vola14_MLP7_weights.02-0.0023.keras', 'PRSA_data_price_LSTM21_weights.11-42962.6562.keras', 'PRSA_data_vola7_LSTM28_weights.10-0.0037.keras', 'PRSA_data_at_LSTM_weights.04-1.2071.keras', 'PRSA_data_price_MLP7_weights.50-1342.5273.keras', 'PRSA_data_price_MLP14_weights.03-5108.7837.keras', 'PRSA_data_vola28_MLP7_weights.14-0.0012.keras', 'PRSA_data_at_MLP7_weights.11-2.4883.keras', 'PRSA_data_price_LSTM28_weights.08-42832.5430.keras', 'PRSA_data_price_MLP28_weights.01-9242.6553.keras', 'PRSA_data_vola21_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.16-1013.2251.keras', 'PRSA_data_price_MLP28_weights.13-1042.5178.keras', 'PRSA_data_vola28_MLP28_weights.34-0.0006.keras', 'PRSA_data_at_MLP7_weights.01-19.6635.keras', 'PRSA_data_at_MLP7_weights.19-1.8379.keras', 'PRSA_data_at_MLP7_weights.12-2.3991.keras', 'PRSA_data_price_MLP7_weights.15-1346.3275.keras', 'PRSA_data_vola14_MLP28_weights.02-0.0024.keras', 'PRSA_data_at_LSTM21_weights.05-0.7513.keras', 'PRSA_data_vola14_MLP28_weights.07-0.0017.keras', 'PRSA_data_at_MLP28_weights.05-0.0930.keras', 'PRSA_data_at_LSTM28_weights.02-2.0782.keras', 'PRSA_data_price_LSTM14_weights.20-51256.7266.keras', 'PRSA_data_vola21_LSTM21_weights.05-0.0013.keras', 'PRSA_data_price_MLP14_weights.12-1304.7009.keras', 'PRSA_data_vola7_LSTM14_weights.09-0.0044.keras', 'PRSA_data_vola28_MLP28_weights.50-0.0006.keras', 'PRSA_data_vola7_MLP14_weights.15-0.0020.keras', 'PRSA_data_price_MLP14_weights.08-1386.5365.keras', 'PRSA_data_price_LSTM21_weights.09-42982.2383.keras', 'PRSA_data_price_MLP28_weights.11-7366.8994.keras', 'PRSA_data_vola14_MLP7_weights.18-0.0015.keras', 'PRSA_data_price_MLP7_weights.14-1734.7238.keras', 'PRSA_data_at_LSTM28_weights.03-1.3148.keras', 'PRSA_data_price_LSTM14_weights.15-51306.2070.keras', 'PRSA_data_vola21_MLP28_weights.25-0.0010.keras', 'PRSA_data_at_LSTM28_weights.10-0.2820.keras', 'PRSA_data_at_LSTM14_weights.02-2.2175.keras', 'PRSA_data_vola28_LSTM7_weights.08-0.0014.keras', 'PRSA_data_price_MLP28_weights.10-1107.4047.keras', 'PRSA_data_vola28_MLP7_weights.01-0.0068.keras', 'PRSA_data_vola21_LSTM21_weights.03-0.0015.keras', 'PRSA_data_price_MLP7_weights.01-9253.3047.keras', 'PRSA_data_vola21_MLP14_weights.31-0.0007.keras', 'PRSA_data_price_LSTM28_weights.12-42793.6172.keras', 'PRSA_data_price_LSTM14_weights.09-51365.0000.keras', 'PRSA_data_vola28_MLP28_weights.42-0.0006.keras', 'PRSA_data_price_MLP7_weights.05-4792.5054.keras', 'PRSA_data_vola21_LSTM_weights.01-0.0024.keras', 'PRSA_data_price_LSTM28_weights.17-42745.0352.keras', 'PRSA_data_vola28_MLP28_weights.45-0.0006.keras', 'PRSA_data_vola28_MLP14_weights.08-0.0010.keras', 'PRSA_data_at_LSTM_weights.11-0.6498.keras', 'PRSA_data_vola28_MLP28_weights.39-0.0007.keras', 'PRSA_data_at_MLP21_weights.11-0.0326.keras', 'PRSA_data_price_LSTM_weights.20-67006.4141.keras', 'PRSA_data_vola28_MLP7_weights.06-0.0014.keras', 'PRSA_data_at_LSTM14_weights.06-0.8145.keras', 'PRSA_data_at_MLP28_weights.01-2.1902.keras', 'PRSA_data_price_MLP14_weights.46-814.1992.keras', '.DS_Store', 'PRSA_data_at_LSTM21_weights.01-5.9452.keras', 'PRSA_data_vola28_LSTM28_weights.12-0.0006.keras', 'PRSA_data_price_LSTM14_weights.18-51276.5625.keras', 'PRSA_data_vola21_LSTM14_weights.01-0.0078.keras', 'PRSA_data_vola14_MLP21_weights.04-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM14_weights.02-0.0011.keras', 'PRSA_data_at_MLP7_weights.09-2.9793.keras', 'PRSA_data_vola21_MLP14_weights.06-0.0007.keras', 'PRSA_data_vola14_MLP21_weights.78-0.0010.keras', 'PRSA_data_price_MLP14_weights.64-711.9107.keras', 'PRSA_data_vola14_MLP28_weights.31-0.0011.keras', 'PRSA_data_vola14_LSTM_weights.08-0.0020.keras', 'PRSA_data_vola14_LSTM21_weights.02-0.0018.keras', 'PRSA_data_at_LSTM21_weights.02-2.0083.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0014.keras', 'PRSA_data_vola14_MLP14_weights.29-0.0005.keras', 'PRSA_data_price_LSTM_weights.03-67175.7734.keras', 'PRSA_data_vola14_LSTM_weights.01-0.0068.keras', 'PRSA_data_price_LSTM28_weights.11-42803.3086.keras', 'PRSA_data_price_MLP28_weights.15-7107.3306.keras', 'PRSA_data_vola14_MLP21_weights.49-0.0010.keras', 'PRSA_data_vola28_MLP7_weights.07-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0010.keras', 'PRSA_data_at_LSTM14_weights.15-0.4082.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0011.keras', 'PRSA_data_at_LSTM21_weights.19-0.3670.keras', 'PRSA_data_vola14_MLP14_weights.06-0.0006.keras', 'PRSA_data_vola14_LSTM21_weights.06-0.0012.keras', 'PRSA_data_price_LSTM_weights.17-67036.2891.keras', 'PRSA_data_vola28_MLP7_weights.03-0.0018.keras', 'PRSA_data_vola7_MLP28_weights.02-0.0039.keras', 'PRSA_data_vola7_LSTM28_weights.16-0.0035.keras', 'PRSA_data_vola21_MLP7_weights.20-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.33-0.0009.keras', 'PRSA_data_price_MLP21_weights.64-870.7020.keras', 'PRSA_data_vola21_MLP14_weights.22-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.14-0.0020.keras', 'PRSA_data_price_LSTM21_weights.17-42903.9648.keras', 'PRSA_data_vola21_MLP14_weights.01-0.0021.keras', 'PRSA_data_vola21_LSTM7_weights.16-0.0011.keras', 'PRSA_data_at_LSTM21_weights.17-0.5070.keras', 'PRSA_data_at_LSTM14_weights.11-0.6656.keras', 'PRSA_data_vola28_MLP7_weights.02-0.0019.keras', 'PRSA_data_vola14_MLP28_weights.05-0.0018.keras', 'PRSA_data_vola14_LSTM14_weights.08-0.0005.keras', 'PRSA_data_at_MLP21_weights.02-0.8171.keras', 'PRSA_data_price_MLP28_weights.02-8134.2202.keras', 'PRSA_data_price_LSTM28_weights.06-42852.3320.keras', 'PRSA_data_vola7_MLP14_weights.02-0.0036.keras', 'PRSA_data_vola14_LSTM28_weights.01-0.0026.keras', 'PRSA_data_at_MLP7_weights.04-9.8837.keras', 'PRSA_data_price_MLP28_weights.16-1274.4368.keras', 'PRSA_data_price_LSTM21_weights.07-43002.0078.keras', 'PRSA_data_vola7_MLP28_weights.23-0.0032.keras', 'PRSA_data_price_LSTM_weights.01-67196.3359.keras', 'PRSA_data_at_MLP28_weights.02-1.9156.keras', 'PRSA_data_vola14_MLP21_weights.02-0.0016.keras', 'PRSA_data_vola28_MLP14_weights.14-0.0007.keras', 'PRSA_data_price_LSTM28_weights.07-42842.4375.keras', 'PRSA_data_price_MLP14_weights.37-825.1805.keras', 'PRSA_data_vola7_MLP14_weights.07-0.0021.keras', 'PRSA_data_price_LSTM14_weights.04-51414.8945.keras', 'PRSA_data_price_MLP7_weights.42-1446.1339.keras', 'PRSA_data_price_MLP21_weights.57-932.6497.keras', 'PRSA_data_price_LSTM14_weights.19-51266.6133.keras', 'PRSA_data_vola21_MLP14_weights.34-0.0008.keras', 'PRSA_data_price_MLP14_weights.01-10485.8262.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0012.keras', 'PRSA_data_at_LSTM_weights.02-2.4966.keras', 'PRSA_data_at_LSTM28_weights.08-0.3981.keras', 'PRSA_data_vola21_LSTM28_weights.15-0.0009.keras', 'PRSA_data_price_LSTM_weights.20-67006.3047.keras', 'PRSA_data_price_LSTM_weights.14-67066.1016.keras', 'PRSA_data_price_LSTM21_weights.19-42884.2422.keras', 'PRSA_data_vola28_LSTM21_weights.18-0.0008.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0013.keras', 'PRSA_data_price_LSTM28_weights.20-42714.5508.keras', 'PRSA_data_at_LSTM14_weights.13-0.5609.keras', 'PRSA_data_vola28_LSTM28_weights.04-0.0009.keras', 'PRSA_data_vola14_MLP21_weights.01-0.0118.keras', 'PRSA_data_vola7_MLP21_weights.02-0.0032.keras', 'PRSA_data_at_MLP7_weights.74-0.2838.keras', 'PRSA_data_price_MLP28_weights.17-1217.0461.keras', 'PRSA_data_at_MLP21_weights.16-1.1973.keras', 'PRSA_data_vola7_MLP7_weights.02-0.0036.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0015.keras', 'PRSA_data_vola14_LSTM14_weights.04-0.0006.keras', 'PRSA_data_vola28_LSTM28_weights.18-0.0006.keras', 'PRSA_data_price_MLP21_weights.74-805.9489.keras', 'PRSA_data_vola21_MLP14_weights.02-0.0011.keras', 'PRSA_data_vola28_MLP21_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM_weights.11-0.0014.keras', 'PRSA_data_vola7_LSTM14_weights.14-0.0036.keras', 'PRSA_data_at_MLP28_weights.03-1.7712.keras', 'PRSA_data_price_LSTM21_weights.20-42874.3555.keras', 'PRSA_data_vola28_LSTM7_weights.20-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.04-0.0009.keras', 'PRSA_data_at_MLP7_weights.08-3.6716.keras', 'PRSA_data_at_MLP21_weights.04-0.0734.keras', 'PRSA_data_vola21_LSTM28_weights.03-0.0021.keras', 'PRSA_data_vola21_MLP14_weights.35-0.0008.keras', 'PRSA_data_vola28_LSTM7_weights.11-0.0012.keras', 'PRSA_data_price_LSTM28_weights.04-42872.1953.keras', 'PRSA_data_at_LSTM_weights.08-0.4803.keras', 'PRSA_data_vola28_MLP14_weights.05-0.0010.keras', 'PRSA_data_price_LSTM14_weights.14-51316.0625.keras', 'PRSA_data_vola7_MLP14_weights.01-0.0101.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0015.keras', 'PRSA_data_vola7_LSTM_weights.01-0.0040.keras', 'PRSA_data_vola14_MLP21_weights.09-0.0011.keras', 'PRSA_data_price_LSTM21_weights.05-43021.9570.keras', 'PRSA_data_vola28_MLP28_weights.03-0.0007.keras', 'PRSA_data_vola7_LSTM14_weights.10-0.0042.keras', 'PRSA_data_price_LSTM_weights.07-67135.3359.keras', 'PRSA_data_vola7_MLP14_weights.18-0.0020.keras', 'PRSA_data_price_LSTM21_weights.18-42894.1055.keras', 'PRSA_data_vola7_MLP7_weights.01-0.0042.keras', 'PRSA_data_vola14_LSTM_weights.05-0.0024.keras', 'PRSA_data_vola21_MLP14_weights.27-0.0008.keras', 'PRSA_data_vola28_MLP28_weights.02-0.0018.keras', 'PRSA_data_vola21_LSTM7_weights.03-0.0017.keras', 'PRSA_data_price_LSTM_weights.10-67105.4297.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0010.keras', 'PRSA_data_vola7_MLP21_weights.03-0.0030.keras', 'PRSA_data_at_MLP7_weights.06-5.3724.keras', 'PRSA_data_at_LSTM_weights.05-1.1226.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0026.keras', 'PRSA_data_vola21_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola14_MLP21_weights.05-0.0012.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.08-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.47-0.0008.keras', 'PRSA_data_price_LSTM14_weights.16-51296.3633.keras', 'PRSA_data_at_MLP7_weights.02-16.3421.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0030.keras', 'PRSA_data_vola21_MLP21_weights.09-0.0009.keras', 'PRSA_data_at_LSTM14_weights.01-6.2168.keras', 'PRSA_data_vola28_LSTM21_weights.01-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.16-0.0009.keras', 'PRSA_data_vola21_LSTM28_weights.07-0.0016.keras', 'PRSA_data_vola21_MLP28_weights.19-0.0010.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0009.keras', 'PRSA_data_vola14_MLP7_weights.09-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.02-0.0011.keras', 'PRSA_data_vola28_LSTM14_weights.03-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.09-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.09-0.0012.keras', 'PRSA_data_vola28_MLP21_weights.65-0.0007.keras', 'PRSA_data_vola21_MLP28_weights.15-0.0011.keras', 'PRSA_data_at_LSTM28_weights.04-0.7851.keras', 'PRSA_data_at_MLP7_weights.42-0.8694.keras', 'PRSA_data_vola21_LSTM28_weights.18-0.0008.keras', 'PRSA_data_vola21_MLP14_weights.04-0.0008.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0040.keras', 'PRSA_data_vola21_LSTM14_weights.12-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0029.keras', 'PRSA_data_vola21_MLP28_weights.01-0.0026.keras', 'PRSA_data_price_LSTM_weights.13-67075.9453.keras', 'PRSA_data_vola14_MLP7_weights.01-0.0041.keras', 'PRSA_data_price_MLP21_weights.02-1476.1034.keras', 'PRSA_data_vola14_LSTM28_weights.19-0.0014.keras', 'PRSA_data_price_MLP14_weights.96-663.0834.keras', 'PRSA_data_vola7_MLP21_weights.94-0.0024.keras', 'PRSA_data_price_LSTM21_weights.03-43041.9570.keras', 'PRSA_data_price_MLP7_weights.12-3798.9531.keras', 'PRSA_data_price_LSTM28_weights.10-42813.0352.keras', 'PRSA_data_vola14_MLP28_weights.20-0.0014.keras', 'PRSA_data_vola7_MLP21_weights.04-0.0027.keras', 'PRSA_data_vola7_LSTM28_weights.18-0.0035.keras', 'PRSA_data_at_MLP7_weights.10-2.7632.keras', 'PRSA_data_at_LSTM21_weights.18-0.3561.keras', 'PRSA_data_price_LSTM_weights.06-67145.4141.keras', 'PRSA_data_price_LSTM28_weights.13-42783.9805.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM28_weights.01-42902.4570.keras', 'PRSA_data_price_LSTM_weights.20-67005.9609.keras', 'PRSA_data_vola7_LSTM21_weights.04-0.0034.keras', 'PRSA_data_at_MLP14_weights.03-0.0461.keras', 'PRSA_data_at_LSTM21_weights.07-0.5113.keras', 'PRSA_data_vola28_MLP28_weights.01-0.0042.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0455.keras', 'PRSA_data_price_MLP28_weights.17-1182.9105.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0018.keras', 'PRSA_data_vola28_MLP14_weights.38-0.0006.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0013.keras', 'PRSA_data_vola7_LSTM21_weights.01-0.0038.keras', 'PRSA_data_price_LSTM14_weights.17-51286.4570.keras', 'PRSA_data_price_LSTM28_weights.16-42754.8008.keras', 'PRSA_data_price_LSTM14_weights.11-51345.4023.keras', 'PRSA_data_price_MLP14_weights.22-638.4409.keras', 'PRSA_data_vola7_MLP28_weights.32-0.0029.keras', 'PRSA_data_price_MLP7_weights.48-1648.1864.keras', 'PRSA_data_vola28_LSTM28_weights.10-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.03-0.0021.keras', 'PRSA_data_vola21_LSTM21_weights.13-0.0010.keras', 'PRSA_data_at_MLP7_weights.07-4.2627.keras', 'PRSA_data_vola7_LSTM21_weights.17-0.0029.keras', 'PRSA_data_vola21_LSTM28_weights.08-0.0011.keras', 'PRSA_data_price_MLP21_weights.05-1072.5081.keras', 'PRSA_data_at_LSTM21_weights.06-0.6219.keras', 'PRSA_data_vola7_LSTM21_weights.05-0.0030.keras', 'PRSA_data_price_MLP14_weights.29-856.8320.keras', 'PRSA_data_vola14_LSTM21_weights.11-0.0011.keras', 'PRSA_data_price_LSTM21_weights.10-42972.4219.keras', 'PRSA_data_at_LSTM21_weights.19-0.2744.keras', 'PRSA_data_price_LSTM_weights.02-67185.9453.keras', 'PRSA_data_vola28_MLP14_weights.21-0.0007.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.10-0.0009.keras', 'PRSA_data_price_LSTM28_weights.15-42764.5898.keras', 'PRSA_data_at_LSTM14_weights.07-0.7059.keras', 'PRSA_data_vola21_LSTM21_weights.01-0.0048.keras', 'PRSA_data_at_LSTM14_weights.03-1.3422.keras', 'PRSA_data_vola14_LSTM14_weights.02-0.0015.keras', 'PRSA_data_vola14_MLP14_weights.02-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.02-0.0034.keras', 'PRSA_data_vola7_MLP21_weights.21-0.0025.keras', 'PRSA_data_at_MLP7_weights.18-1.8396.keras', 'PRSA_data_price_LSTM_weights.11-67095.6172.keras', 'PRSA_data_at_LSTM14_weights.12-0.6378.keras', 'PRSA_data_vola21_LSTM21_weights.07-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.45-0.0030.keras', 'PRSA_data_at_MLP7_weights.26-0.8897.keras', 'PRSA_data_vola21_MLP7_weights.01-0.0014.keras', 'PRSA_data_price_MLP14_weights.19-982.5739.keras', 'PRSA_data_vola14_LSTM_weights.16-0.0017.keras', 'PRSA_data_price_MLP21_weights.94-813.8954.keras', 'PRSA_data_vola7_LSTM21_weights.20-0.0029.keras', 'PRSA_data_at_LSTM_weights.03-1.4842.keras', 'PRSA_data_vola21_MLP28_weights.02-0.0015.keras', 'PRSA_data_price_LSTM21_weights.12-42952.9453.keras', 'PRSA_data_price_MLP28_weights.17-1019.2816.keras', 'PRSA_data_vola14_LSTM_weights.13-0.0019.keras', 'PRSA_data_vola7_LSTM_weights.02-0.0038.keras', 'PRSA_data_vola7_LSTM28_weights.02-0.0042.keras', 'PRSA_data_at_LSTM28_weights.12-0.0705.keras', 'PRSA_data_at_LSTM14_weights.13-0.2270.keras', 'PRSA_data_price_LSTM_weights.18-67026.2891.keras', 'PRSA_data_vola28_MLP21_weights.66-0.0007.keras', 'PRSA_data_at_MLP14_weights.42-0.0373.keras', 'PRSA_data_vola7_MLP14_weights.38-0.0020.keras', 'PRSA_data_vola7_LSTM21_weights.11-0.0029.keras', 'PRSA_data_vola21_LSTM21_weights.12-0.0010.keras', 'PRSA_data_at_LSTM_weights.14-0.5158.keras', 'PRSA_data_vola14_LSTM21_weights.01-0.0019.keras', 'PRSA_data_vola28_LSTM21_weights.11-0.0009.keras', 'PRSA_data_vola7_LSTM28_weights.05-0.0038.keras', 'PRSA_data_vola14_MLP14_weights.10-0.0005.keras', 'PRSA_data_at_MLP21_weights.03-2.1540.keras', 'PRSA_data_at_LSTM_weights.16-0.5377.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.44-0.0008.keras', 'PRSA_data_at_MLP7_weights.16-1.9941.keras', 'PRSA_data_vola28_LSTM28_weights.01-0.0010.keras', 'PRSA_data_vola28_LSTM14_weights.19-0.0006.keras', 'PRSA_data_price_LSTM21_weights.14-42933.4180.keras', 'PRSA_data_vola14_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.82-828.7976.keras', 'PRSA_data_vola28_MLP21_weights.01-0.0030.keras', 'PRSA_data_price_LSTM14_weights.10-51355.2070.keras', 'PRSA_data_vola21_MLP21_weights.18-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.73-0.0007.keras', 'PRSA_data_vola28_MLP7_weights.20-0.0011.keras', 'PRSA_data_price_LSTM21_weights.13-42943.1719.keras', 'PRSA_data_vola28_MLP28_weights.36-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.04-0.0025.keras', 'PRSA_data_at_MLP14_weights.02-0.2715.keras', 'PRSA_data_price_LSTM14_weights.05-51404.8945.keras', 'PRSA_data_price_LSTM28_weights.19-42725.4297.keras', 'PRSA_data_price_MLP28_weights.01-1561.6656.keras', 'PRSA_data_vola21_MLP7_weights.14-0.0011.keras', 'PRSA_data_vola7_MLP7_weights.23-0.0033.keras', 'PRSA_data_vola14_MLP28_weights.01-0.0026.keras', 'PRSA_data_at_MLP14_weights.29-0.0323.keras', 'PRSA_data_vola7_MLP21_weights.01-0.0033.keras', 'PRSA_data_vola21_LSTM_weights.19-0.0014.keras', 'PRSA_data_price_MLP7_weights.03-5953.8076.keras', 'PRSA_data_price_MLP21_weights.42-948.6628.keras', 'PRSA_data_vola21_LSTM14_weights.07-0.0008.keras', 'PRSA_data_vola14_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola7_LSTM28_weights.13-0.0037.keras', 'PRSA_data_vola21_MLP21_weights.01-0.0016.keras', 'PRSA_data_vola14_LSTM21_weights.08-0.0011.keras', 'PRSA_data_price_LSTM21_weights.06-43011.9570.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0019.keras', 'PRSA_data_vola7_MLP28_weights.16-0.0033.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0008.keras', 'PRSA_data_vola28_MLP14_weights.01-0.0031.keras', 'PRSA_data_vola14_MLP28_weights.11-0.0015.keras', 'PRSA_data_price_LSTM14_weights.03-51425.0000.keras', 'PRSA_data_vola7_LSTM28_weights.04-0.0039.keras', 'PRSA_data_vola28_LSTM7_weights.13-0.0011.keras', 'PRSA_data_price_LSTM14_weights.01-51445.4883.keras', 'PRSA_data_vola28_MLP28_weights.22-0.0006.keras', 'PRSA_data_price_LSTM_weights.09-67115.3359.keras', 'PRSA_data_vola28_LSTM7_weights.14-0.0011.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0050.keras', 'PRSA_data_vola21_MLP28_weights.17-0.0010.keras', 'PRSA_data_vola28_LSTM21_weights.09-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.13-0.0010.keras', 'PRSA_data_vola14_MLP14_weights.01-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.50-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.2184.keras', 'PRSA_data_vola14_LSTM_weights.02-0.0030.keras', 'PRSA_data_vola7_MLP28_weights.39-0.0030.keras', 'PRSA_data_price_LSTM21_weights.04-43031.9414.keras', 'PRSA_data_price_LSTM_weights.15-67056.2109.keras', 'PRSA_data_at_MLP7_weights.20-1.6814.keras', 'PRSA_data_vola28_LSTM7_weights.18-0.0010.keras', 'PRSA_data_at_MLP7_weights.95-0.2130.keras', 'PRSA_data_vola14_LSTM28_weights.03-0.0021.keras', 'PRSA_data_at_LSTM_weights.01-7.5653.keras', 'PRSA_data_at_MLP7_weights.05-7.2110.keras', 'PRSA_data_vola21_LSTM28_weights.20-0.0008.keras', 'PRSA_data_at_LSTM_weights.07-0.8569.keras', 'PRSA_data_vola14_LSTM28_weights.12-0.0014.keras', 'PRSA_data_price_LSTM21_weights.16-42913.7969.keras', 'PRSA_data_at_MLP28_weights.04-1.3772.keras', 'PRSA_data_price_LSTM_weights.08-67125.3047.keras', 'PRSA_data_price_LSTM14_weights.07-51384.8633.keras', 'PRSA_data_price_LSTM_weights.04-67165.6016.keras', 'PRSA_data_vola14_LSTM28_weights.15-0.0014.keras', 'PRSA_data_at_MLP14_weights.01-0.4195.keras', 'PRSA_data_price_LSTM28_weights.09-42822.7578.keras', 'PRSA_data_price_LSTM28_weights.03-42882.1953.keras', 'PRSA_data_at_LSTM14_weights.05-0.8408.keras', 'PRSA_data_vola7_MLP28_weights.08-0.0030.keras', 'PRSA_data_price_MLP14_weights.07-3004.8474.keras', 'PRSA_data_price_LSTM21_weights.15-42923.6367.keras', 'PRSA_data_vola7_MLP21_weights.81-0.0025.keras', 'PRSA_data_price_MLP21_weights.86-819.7106.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.05-0.0012.keras', 'PRSA_data_vola21_MLP14_weights.20-0.0007.keras', 'PRSA_data_vola14_MLP28_weights.42-0.0011.keras', 'PRSA_data_price_LSTM_weights.12-67085.7578.keras', 'PRSA_data_vola28_MLP21_weights.13-0.0008.keras', 'PRSA_data_vola14_MLP28_weights.08-0.0016.keras', 'PRSA_data_at_LSTM28_weights.05-0.5407.keras', 'PRSA_data_at_MLP7_weights.86-0.0883.keras', 'PRSA_data_vola28_LSTM14_weights.01-0.0020.keras', 'PRSA_data_vola21_MLP21_weights.71-0.0010.keras', 'PRSA_data_at_MLP28_weights.09-0.1537.keras', 'PRSA_data_vola7_MLP28_weights.48-0.0030.keras', 'PRSA_data_at_MLP28_weights.12-0.0354.keras', 'PRSA_data_at_LSTM14_weights.04-0.9732.keras', 'PRSA_data_at_LSTM14_weights.15-0.2154.keras', 'PRSA_data_vola14_MLP28_weights.21-0.0013.keras', 'PRSA_data_vola21_MLP14_weights.25-0.0009.keras', 'PRSA_data_vola14_LSTM14_weights.01-0.0027.keras', 'PRSA_data_at_LSTM21_weights.20-0.0855.keras', 'PRSA_data_vola21_LSTM28_weights.01-0.0039.keras', 'PRSA_data_price_MLP14_weights.02-8059.2510.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0013.keras', 'PRSA_data_vola28_MLP7_weights.04-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.02-0.0043.keras', 'PRSA_data_price_MLP14_weights.78-683.9230.keras', 'PRSA_data_price_MLP21_weights.19-1002.5925.keras', 'PRSA_data_vola28_MLP21_weights.92-0.0007.keras', 'PRSA_data_price_LSTM28_weights.14-42774.2500.keras', 'PRSA_data_vola21_MLP21_weights.06-0.0010.keras', 'PRSA_data_vola14_LSTM14_weights.03-0.0010.keras', 'PRSA_data_price_LSTM_weights.16-67046.2422.keras', 'PRSA_data_vola7_LSTM28_weights.07-0.0037.keras', 'PRSA_data_vola28_LSTM7_weights.01-0.0017.keras', 'PRSA_data_vola28_LSTM14_weights.02-0.0008.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0022.keras', 'PRSA_data_at_MLP21_weights.03-0.2039.keras', 'PRSA_data_vola14_LSTM28_weights.02-0.0023.keras', 'PRSA_data_at_MLP7_weights.03-13.2212.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0037.keras', 'PRSA_data_vola14_MLP21_weights.69-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.18-0.0013.keras', 'PRSA_data_vola7_MLP21_weights.49-0.0025.keras', 'PRSA_data_vola28_MLP28_weights.31-0.0006.keras', 'PRSA_data_price_MLP21_weights.77-841.5739.keras', 'PRSA_data_vola28_MLP14_weights.35-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.26-0.0031.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0065.keras', 'PRSA_data_price_LSTM14_weights.02-51435.1133.keras', 'PRSA_data_price_LSTM28_weights.20-42714.7695.keras', 'PRSA_data_vola28_LSTM28_weights.16-0.0006.keras', 'PRSA_data_vola14_LSTM28_weights.06-0.0016.keras', 'PRSA_data_vola21_MLP21_weights.13-0.0010.keras', 'PRSA_data_vola28_MLP14_weights.32-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.01-0.0043.keras', 'PRSA_data_vola21_MLP28_weights.08-0.0011.keras', 'PRSA_data_vola21_LSTM28_weights.10-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.14-0.0012.keras', 'PRSA_data_vola7_LSTM28_weights.01-0.0049.keras', 'PRSA_data_price_LSTM28_weights.20-42715.6250.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM_weights.05-67155.5078.keras', 'PRSA_data_price_MLP14_weights.14-1073.6493.keras', 'PRSA_data_vola14_LSTM_weights.03-0.0029.keras', 'PRSA_data_vola14_MLP7_weights.07-0.0021.keras', 'PRSA_data_at_MLP28_weights.03-0.0637.keras', 'PRSA_data_vola21_MLP28_weights.03-0.0012.keras', 'PRSA_data_at_MLP28_weights.05-0.1823.keras', 'PRSA_data_at_MLP21_weights.01-0.9762.keras', 'PRSA_data_vola7_LSTM28_weights.15-0.0035.keras', 'PRSA_data_price_LSTM21_weights.08-42992.0703.keras', 'PRSA_data_vola21_MLP21_weights.22-0.0010.keras', 'PRSA_data_at_LSTM28_weights.01-6.0769.keras', 'PRSA_data_vola28_LSTM28_weights.20-0.0006.keras', 'PRSA_data_at_MLP21_weights.01-4.3241.keras', 'PRSA_data_at_LSTM_weights.06-0.8671.keras', 'PRSA_data_price_MLP7_weights.37-1654.4709.keras', 'PRSA_data_at_LSTM_weights.20-0.4626.keras', 'PRSA_data_vola21_MLP28_weights.27-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.4729.keras', 'PRSA_data_at_MLP7_weights.24-1.4718.keras', 'PRSA_data_at_MLP7_weights.59-0.4636.keras', 'PRSA_data_vola14_LSTM14_weights.06-0.0005.keras', 'PRSA_data_at_MLP7_weights.14-2.2322.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0196.keras', 'PRSA_data_at_LSTM14_weights.19-0.3356.keras', 'PRSA_data_price_LSTM28_weights.18-42735.2305.keras', 'PRSA_data_at_LSTM21_weights.04-0.8083.keras', 'PRSA_data_price_LSTM14_weights.12-51335.6523.keras', 'PRSA_data_vola21_LSTM_weights.06-0.0017.keras', 'PRSA_data_price_LSTM28_weights.05-42862.2383.keras', 'PRSA_data_price_MLP7_weights.10-1248.9592.keras', 'PRSA_data_vola21_MLP28_weights.35-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.03-0.0013.keras', 'PRSA_data_at_LSTM21_weights.03-1.0323.keras', 'PRSA_data_price_MLP14_weights.04-5028.7607.keras', 'PRSA_data_price_LSTM14_weights.08-51374.8945.keras', 'PRSA_data_vola28_MLP14_weights.48-0.0007.keras']
# Predicciones usando el mejor modelo
if best_model14 is not None:
train_preds_price_MLP14 = best_model14.predict(X_train_price14)
val_preds_price_MLP14 = best_model14.predict(X_val_price14)
test_preds_price_MLP14 = best_model14.predict(X_test_price14)
# Aplanar las predicciones si es necesario
train_preds_price_MLP14 = np.squeeze(train_preds_price_MLP14)
val_preds_price_MLP14 = np.squeeze(val_preds_price_MLP14)
test_preds_price_MLP14 = np.squeeze(test_preds_price_MLP14)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_price_MLP14)
print("Predicciones de validación:", val_preds_price_MLP14)
print("Predicciones de prueba:", test_preds_price_MLP14)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
1/155 [..............................] - ETA: 2s
155/155 [==============================] - 0s 165us/step
1/1 [==============================] - ETA: 0s
1/1 [==============================] - 0s 5ms/step
1/1 [==============================] - ETA: 0s
1/1 [==============================] - 0s 5ms/step
Predicciones de Entrenamiento: [9.0316725e-01 9.0316725e-01 9.0316725e-01 ... 4.4856195e+04 4.6219316e+04
4.7600551e+04]
Predicciones de validación: [48279.617 49634.098 50278.44 51306.207 52084.176 52479.434 52224.63
52551.027 52494.598 52203.816 52493.336 51601.45 51266.312 51471.566]
Predicciones de prueba: [51691.61 53216.31 55726.504 59792.12 61614.906 62616.48 63433.277
63773.684 66592.45 65789.625 65850.83 66823.67 68625.36 68518.266]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 14 días (\(\tau = 14\)).
data_train_plot_price14, data_val_plot_price14, data_test_plot_price14 = data_plot(df_1_st['Price'], 14)
Datos de entrenamiento:
4998 0.1
4997 0.1
4996 0.1
4995 0.1
4994 0.1
...
32 51858.2
31 51320.4
30 50740.5
29 51571.6
28 51722.7
Name: Price, Length: 4971, dtype: float6414
Datos de validación:
27 54495.1
26 57056.2
25 62467.6
24 61169.3
23 62397.7
22 61994.5
21 63135.8
20 68270.1
19 63792.6
18 66080.4
17 66855.3
16 68172.0
15 68366.5
14 68964.8
Name: Price, dtype: float6414
Datos de prueba:
13 72099.1
12 71470.2
11 73066.3
10 71387.5
9 69463.7
8 65314.2
7 68391.2
6 67594.1
5 62050.0
4 67854.0
3 65503.8
2 63785.5
1 64037.8
0 67211.9
Name: Price, dtype: float6414
plot_model(data_train_plot_price14[-100:], data_val_plot_price14, data_test_plot_price14, val_preds_price_MLP14, test_preds_price_MLP14, "Predicciones usando Perceptrón Multicapa (MLP) para un horizonte de 14 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de validación(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train14, jarque_bera_pval_MLP_train14 = diagnostic_plots(y_train_price14, train_preds_price_MLP14)
Ljung-Box LB Statistic: 717.340325
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_price_MLP_train14 = metricas(y_train_price14,train_preds_price_MLP14)
metrica_price_MLP_train14.index = metrica_price_MLP_train14.index.map({0: 'MLP Entrenamiento Price τ = 14'})
metrica_price_MLP_train14['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train14], index=metrica_price_MLP_train14.index)
metrica_price_MLP_train14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train14], index=metrica_price_MLP_train14.index)
metrica_price_MLP_train14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Price τ = 14 | 2.6896e+09 | 24.8% | 302.87 | 544131.26 | 99.77% | 5.0699e-158 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP14, jarque_bera_pvalMLP14 = evaluate_residuals(data_test_plot_price14, test_preds_price_MLP14)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP14_test = metricas(y_test_price14,test_preds_price_MLP14)
metrica_MLP14_test.index = metrica_MLP14_test.index.map({0: 'MLP Prueba Price τ = 14'})
metrica_MLP14_test['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP14], index=metrica_MLP14_test.index)
metrica_MLP14_test['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP14], index=metrica_MLP14_test.index)
metrica_MLP14_test
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Price τ = 14 | 1.0227e+08 | 3.11% | 1936.37 | 7.3047e+06 | 58.69% | 0.0022 | 0.5157 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_price_MPL14)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_price14, train_preds_price_MLP14, y_val_price14, val_preds_price_MLP14, y_test_price14, test_preds_price_MLP14)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo. De igual forma se observa una alta variabilidad en los residuales.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 21 días (\(\tau=21\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(21,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_price21, y_train_price21)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -636.5871337890625
A continuación, configuramos la red MLP empleando la API funcional de Keras. Con este método, una capa puede ser designada como la entrada para la siguiente capa en el momento de su definición.
En este contexto, Input es una función utilizada para establecer una capa de entrada en un modelo de red neuronal. La propiedad shape=(21,) define la estructura de los datos de entrada, lo que indica que estos tendrán 21 dimensiones. Por otro lado, dtype='float32' especifica que los elementos de esta capa serán números de punto flotante de 32 bits.
A continuación se define función buil_models para generación de modelos de acuerdo a los parámetros indexados a tráves de la lista.
A idexación de parámetros función build_models_mlp con base a lo indicado en el númeral 3 del parcial práctico.
input_shape21 = 21
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP21 = build_models_mlp(input_shape21, neurons_list, dropout_rates, 'relu')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_35"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_36 (InputLayer) [(None, 21)] 0
dense_140 (Dense) (None, 32) 704
dense_141 (Dense) (None, 16) 528
dense_142 (Dense) (None, 16) 272
dropout_35 (Dropout) (None, 16) 0
dense_143 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_36"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_37 (InputLayer) [(None, 21)] 0
dense_144 (Dense) (None, 32) 704
dense_145 (Dense) (None, 16) 528
dense_146 (Dense) (None, 16) 272
dropout_36 (Dropout) (None, 16) 0
dense_147 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_37"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_38 (InputLayer) [(None, 21)] 0
dense_148 (Dense) (None, 32) 704
dense_149 (Dense) (None, 16) 528
dense_150 (Dense) (None, 16) 272
dropout_37 (Dropout) (None, 16) 0
dense_151 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_38"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_39 (InputLayer) [(None, 21)] 0
dense_152 (Dense) (None, 32) 704
dense_153 (Dense) (None, 16) 528
dense_154 (Dense) (None, 16) 272
dropout_38 (Dropout) (None, 16) 0
dense_155 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_39"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_40 (InputLayer) [(None, 21)] 0
dense_156 (Dense) (None, 32) 704
dense_157 (Dense) (None, 16) 528
dense_158 (Dense) (None, 16) 272
dropout_39 (Dropout) (None, 16) 0
dense_159 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_40"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_41 (InputLayer) [(None, 21)] 0
dense_160 (Dense) (None, 32) 704
dense_161 (Dense) (None, 16) 528
dense_162 (Dense) (None, 16) 272
dropout_40 (Dropout) (None, 16) 0
dense_163 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_41"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_42 (InputLayer) [(None, 21)] 0
dense_164 (Dense) (None, 32) 704
dense_165 (Dense) (None, 16) 528
dense_166 (Dense) (None, 16) 272
dropout_41 (Dropout) (None, 16) 0
dense_167 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_42"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_43 (InputLayer) [(None, 21)] 0
dense_168 (Dense) (None, 32) 704
dense_169 (Dense) (None, 16) 528
dense_170 (Dense) (None, 16) 272
dropout_42 (Dropout) (None, 16) 0
dense_171 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_43"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_44 (InputLayer) [(None, 21)] 0
dense_172 (Dense) (None, 32) 704
dense_173 (Dense) (None, 16) 528
dense_174 (Dense) (None, 16) 272
dropout_43 (Dropout) (None, 16) 0
dense_175 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_44"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_45 (InputLayer) [(None, 21)] 0
dense_176 (Dense) (None, 32) 704
dense_177 (Dense) (None, 16) 528
dense_178 (Dense) (None, 16) 272
dropout_44 (Dropout) (None, 16) 0
dense_179 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_45"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_46 (InputLayer) [(None, 21)] 0
dense_180 (Dense) (None, 32) 704
dense_181 (Dense) (None, 16) 528
dense_182 (Dense) (None, 16) 272
dropout_45 (Dropout) (None, 16) 0
dense_183 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_46"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_47 (InputLayer) [(None, 21)] 0
dense_184 (Dense) (None, 32) 704
dense_185 (Dense) (None, 16) 528
dense_186 (Dense) (None, 16) 272
dropout_46 (Dropout) (None, 16) 0
dense_187 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_47"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_48 (InputLayer) [(None, 21)] 0
dense_188 (Dense) (None, 32) 704
dense_189 (Dense) (None, 16) 528
dense_190 (Dense) (None, 16) 272
dropout_47 (Dropout) (None, 16) 0
dense_191 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_48"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_49 (InputLayer) [(None, 21)] 0
dense_192 (Dense) (None, 32) 704
dense_193 (Dense) (None, 16) 528
dense_194 (Dense) (None, 16) 272
dropout_48 (Dropout) (None, 16) 0
dense_195 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_49"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_50 (InputLayer) [(None, 21)] 0
dense_196 (Dense) (None, 32) 704
dense_197 (Dense) (None, 16) 528
dense_198 (Dense) (None, 16) 272
dropout_49 (Dropout) (None, 16) 0
dense_199 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_50"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_51 (InputLayer) [(None, 21)] 0
dense_200 (Dense) (None, 32) 704
dense_201 (Dense) (None, 16) 528
dense_202 (Dense) (None, 16) 272
dropout_50 (Dropout) (None, 16) 0
dense_203 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights_at = os.path.join('keras_models', 'PRSA_data_price_MLP21_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best21 = ModelCheckpoint(save_weights_at, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP21.
import os
from joblib import dump, load
history_price_MPL21 = []
# Iterar sobre cada modelo en la lista models_MLP14
for i, model in enumerate(models_MLP21):
filename = f'history_price_MPL21_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_price21, y=y_train_price21, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best21], validation_data=(X_val_price21, y_val_price21),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_price_MPL21.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_price_MPL21_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL21_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL21_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL21_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL21_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL21_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL21_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL21_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL21_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL21_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL21_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL21_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL21_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL21_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL21_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL21_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_price_MLP21_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model21 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model21 = load_model(best_model_path) # Cargar el modelo
if best_model21 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_price_MLP21_weights.74-805.9489.keras con val_loss: 805.9489
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
print("Archivos en el directorio 'keras_models':", files)
Archivos en el directorio 'keras_models': ['PRSA_data_vola14_MLP7_weights.04-0.0018.keras', 'PRSA_data_price_MLP21_weights.01-3579.4128.keras', 'PRSA_data_price_LSTM21_weights.01-43062.3047.keras', 'PRSA_data_price_LSTM28_weights.02-42892.2305.keras', 'PRSA_data_price_LSTM_weights.19-67016.3047.keras', 'PRSA_data_at_LSTM28_weights.19-0.0651.keras', 'PRSA_data_vola14_MLP21_weights.20-0.0011.keras', 'PRSA_data_price_LSTM14_weights.06-51394.8750.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.07-0.0033.keras', 'PRSA_data_price_LSTM21_weights.02-43052.0469.keras', 'PRSA_data_at_MLP7_weights.80-0.0780.keras', 'PRSA_data_vola21_LSTM28_weights.02-0.0022.keras', 'PRSA_data_vola21_LSTM14_weights.04-0.0011.keras', 'PRSA_data_vola7_LSTM21_weights.18-0.0029.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0013.keras', 'PRSA_data_vola28_MLP14_weights.04-0.0013.keras', 'PRSA_data_price_MLP14_weights.52-626.2623.keras', 'PRSA_data_vola7_MLP7_weights.15-0.0041.keras', 'PRSA_data_price_LSTM14_weights.13-51325.8516.keras', 'PRSA_data_vola14_MLP7_weights.02-0.0023.keras', 'PRSA_data_price_LSTM21_weights.11-42962.6562.keras', 'PRSA_data_vola7_LSTM28_weights.10-0.0037.keras', 'PRSA_data_at_LSTM_weights.04-1.2071.keras', 'PRSA_data_price_MLP7_weights.50-1342.5273.keras', 'PRSA_data_price_MLP14_weights.03-5108.7837.keras', 'PRSA_data_vola28_MLP7_weights.14-0.0012.keras', 'PRSA_data_at_MLP7_weights.11-2.4883.keras', 'PRSA_data_price_LSTM28_weights.08-42832.5430.keras', 'PRSA_data_price_MLP28_weights.01-9242.6553.keras', 'PRSA_data_vola21_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.16-1013.2251.keras', 'PRSA_data_price_MLP28_weights.13-1042.5178.keras', 'PRSA_data_vola28_MLP28_weights.34-0.0006.keras', 'PRSA_data_at_MLP7_weights.01-19.6635.keras', 'PRSA_data_at_MLP7_weights.19-1.8379.keras', 'PRSA_data_at_MLP7_weights.12-2.3991.keras', 'PRSA_data_price_MLP7_weights.15-1346.3275.keras', 'PRSA_data_vola14_MLP28_weights.02-0.0024.keras', 'PRSA_data_at_LSTM21_weights.05-0.7513.keras', 'PRSA_data_vola14_MLP28_weights.07-0.0017.keras', 'PRSA_data_at_MLP28_weights.05-0.0930.keras', 'PRSA_data_at_LSTM28_weights.02-2.0782.keras', 'PRSA_data_price_LSTM14_weights.20-51256.7266.keras', 'PRSA_data_vola21_LSTM21_weights.05-0.0013.keras', 'PRSA_data_price_MLP14_weights.12-1304.7009.keras', 'PRSA_data_vola7_LSTM14_weights.09-0.0044.keras', 'PRSA_data_vola28_MLP28_weights.50-0.0006.keras', 'PRSA_data_vola7_MLP14_weights.15-0.0020.keras', 'PRSA_data_price_MLP14_weights.08-1386.5365.keras', 'PRSA_data_price_LSTM21_weights.09-42982.2383.keras', 'PRSA_data_price_MLP28_weights.11-7366.8994.keras', 'PRSA_data_vola14_MLP7_weights.18-0.0015.keras', 'PRSA_data_price_MLP7_weights.14-1734.7238.keras', 'PRSA_data_at_LSTM28_weights.03-1.3148.keras', 'PRSA_data_price_LSTM14_weights.15-51306.2070.keras', 'PRSA_data_vola21_MLP28_weights.25-0.0010.keras', 'PRSA_data_at_LSTM28_weights.10-0.2820.keras', 'PRSA_data_at_LSTM14_weights.02-2.2175.keras', 'PRSA_data_vola28_LSTM7_weights.08-0.0014.keras', 'PRSA_data_price_MLP28_weights.10-1107.4047.keras', 'PRSA_data_vola28_MLP7_weights.01-0.0068.keras', 'PRSA_data_vola21_LSTM21_weights.03-0.0015.keras', 'PRSA_data_price_MLP7_weights.01-9253.3047.keras', 'PRSA_data_vola21_MLP14_weights.31-0.0007.keras', 'PRSA_data_price_LSTM28_weights.12-42793.6172.keras', 'PRSA_data_price_LSTM14_weights.09-51365.0000.keras', 'PRSA_data_vola28_MLP28_weights.42-0.0006.keras', 'PRSA_data_price_MLP7_weights.05-4792.5054.keras', 'PRSA_data_vola21_LSTM_weights.01-0.0024.keras', 'PRSA_data_price_LSTM28_weights.17-42745.0352.keras', 'PRSA_data_vola28_MLP28_weights.45-0.0006.keras', 'PRSA_data_vola28_MLP14_weights.08-0.0010.keras', 'PRSA_data_at_LSTM_weights.11-0.6498.keras', 'PRSA_data_vola28_MLP28_weights.39-0.0007.keras', 'PRSA_data_at_MLP21_weights.11-0.0326.keras', 'PRSA_data_price_LSTM_weights.20-67006.4141.keras', 'PRSA_data_vola28_MLP7_weights.06-0.0014.keras', 'PRSA_data_at_LSTM14_weights.06-0.8145.keras', 'PRSA_data_at_MLP28_weights.01-2.1902.keras', 'PRSA_data_price_MLP14_weights.46-814.1992.keras', '.DS_Store', 'PRSA_data_at_LSTM21_weights.01-5.9452.keras', 'PRSA_data_vola28_LSTM28_weights.12-0.0006.keras', 'PRSA_data_price_LSTM14_weights.18-51276.5625.keras', 'PRSA_data_vola21_LSTM14_weights.01-0.0078.keras', 'PRSA_data_vola14_MLP21_weights.04-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM14_weights.02-0.0011.keras', 'PRSA_data_at_MLP7_weights.09-2.9793.keras', 'PRSA_data_vola21_MLP14_weights.06-0.0007.keras', 'PRSA_data_vola14_MLP21_weights.78-0.0010.keras', 'PRSA_data_price_MLP14_weights.64-711.9107.keras', 'PRSA_data_vola14_MLP28_weights.31-0.0011.keras', 'PRSA_data_vola14_LSTM_weights.08-0.0020.keras', 'PRSA_data_vola14_LSTM21_weights.02-0.0018.keras', 'PRSA_data_at_LSTM21_weights.02-2.0083.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0014.keras', 'PRSA_data_vola14_MLP14_weights.29-0.0005.keras', 'PRSA_data_price_LSTM_weights.03-67175.7734.keras', 'PRSA_data_vola14_LSTM_weights.01-0.0068.keras', 'PRSA_data_price_LSTM28_weights.11-42803.3086.keras', 'PRSA_data_price_MLP28_weights.15-7107.3306.keras', 'PRSA_data_vola14_MLP21_weights.49-0.0010.keras', 'PRSA_data_vola28_MLP7_weights.07-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0010.keras', 'PRSA_data_at_LSTM14_weights.15-0.4082.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0011.keras', 'PRSA_data_at_LSTM21_weights.19-0.3670.keras', 'PRSA_data_vola14_MLP14_weights.06-0.0006.keras', 'PRSA_data_vola14_LSTM21_weights.06-0.0012.keras', 'PRSA_data_price_LSTM_weights.17-67036.2891.keras', 'PRSA_data_vola28_MLP7_weights.03-0.0018.keras', 'PRSA_data_vola7_MLP28_weights.02-0.0039.keras', 'PRSA_data_vola7_LSTM28_weights.16-0.0035.keras', 'PRSA_data_vola21_MLP7_weights.20-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.33-0.0009.keras', 'PRSA_data_price_MLP21_weights.64-870.7020.keras', 'PRSA_data_vola21_MLP14_weights.22-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.14-0.0020.keras', 'PRSA_data_price_LSTM21_weights.17-42903.9648.keras', 'PRSA_data_vola21_MLP14_weights.01-0.0021.keras', 'PRSA_data_vola21_LSTM7_weights.16-0.0011.keras', 'PRSA_data_at_LSTM21_weights.17-0.5070.keras', 'PRSA_data_at_LSTM14_weights.11-0.6656.keras', 'PRSA_data_vola28_MLP7_weights.02-0.0019.keras', 'PRSA_data_vola14_MLP28_weights.05-0.0018.keras', 'PRSA_data_vola14_LSTM14_weights.08-0.0005.keras', 'PRSA_data_at_MLP21_weights.02-0.8171.keras', 'PRSA_data_price_MLP28_weights.02-8134.2202.keras', 'PRSA_data_price_LSTM28_weights.06-42852.3320.keras', 'PRSA_data_vola7_MLP14_weights.02-0.0036.keras', 'PRSA_data_vola14_LSTM28_weights.01-0.0026.keras', 'PRSA_data_at_MLP7_weights.04-9.8837.keras', 'PRSA_data_price_MLP28_weights.16-1274.4368.keras', 'PRSA_data_price_LSTM21_weights.07-43002.0078.keras', 'PRSA_data_vola7_MLP28_weights.23-0.0032.keras', 'PRSA_data_price_LSTM_weights.01-67196.3359.keras', 'PRSA_data_at_MLP28_weights.02-1.9156.keras', 'PRSA_data_vola14_MLP21_weights.02-0.0016.keras', 'PRSA_data_vola28_MLP14_weights.14-0.0007.keras', 'PRSA_data_price_LSTM28_weights.07-42842.4375.keras', 'PRSA_data_price_MLP14_weights.37-825.1805.keras', 'PRSA_data_vola7_MLP14_weights.07-0.0021.keras', 'PRSA_data_price_LSTM14_weights.04-51414.8945.keras', 'PRSA_data_price_MLP7_weights.42-1446.1339.keras', 'PRSA_data_price_MLP21_weights.57-932.6497.keras', 'PRSA_data_price_LSTM14_weights.19-51266.6133.keras', 'PRSA_data_vola21_MLP14_weights.34-0.0008.keras', 'PRSA_data_price_MLP14_weights.01-10485.8262.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0012.keras', 'PRSA_data_at_LSTM_weights.02-2.4966.keras', 'PRSA_data_at_LSTM28_weights.08-0.3981.keras', 'PRSA_data_vola21_LSTM28_weights.15-0.0009.keras', 'PRSA_data_price_LSTM_weights.20-67006.3047.keras', 'PRSA_data_price_LSTM_weights.14-67066.1016.keras', 'PRSA_data_price_LSTM21_weights.19-42884.2422.keras', 'PRSA_data_vola28_LSTM21_weights.18-0.0008.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0013.keras', 'PRSA_data_price_LSTM28_weights.20-42714.5508.keras', 'PRSA_data_at_LSTM14_weights.13-0.5609.keras', 'PRSA_data_vola28_LSTM28_weights.04-0.0009.keras', 'PRSA_data_vola14_MLP21_weights.01-0.0118.keras', 'PRSA_data_vola7_MLP21_weights.02-0.0032.keras', 'PRSA_data_at_MLP7_weights.74-0.2838.keras', 'PRSA_data_price_MLP28_weights.17-1217.0461.keras', 'PRSA_data_at_MLP21_weights.16-1.1973.keras', 'PRSA_data_vola7_MLP7_weights.02-0.0036.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0015.keras', 'PRSA_data_vola14_LSTM14_weights.04-0.0006.keras', 'PRSA_data_vola28_LSTM28_weights.18-0.0006.keras', 'PRSA_data_price_MLP21_weights.74-805.9489.keras', 'PRSA_data_vola21_MLP14_weights.02-0.0011.keras', 'PRSA_data_vola28_MLP21_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM_weights.11-0.0014.keras', 'PRSA_data_vola7_LSTM14_weights.14-0.0036.keras', 'PRSA_data_at_MLP28_weights.03-1.7712.keras', 'PRSA_data_price_LSTM21_weights.20-42874.3555.keras', 'PRSA_data_vola28_LSTM7_weights.20-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.04-0.0009.keras', 'PRSA_data_at_MLP7_weights.08-3.6716.keras', 'PRSA_data_at_MLP21_weights.04-0.0734.keras', 'PRSA_data_vola21_LSTM28_weights.03-0.0021.keras', 'PRSA_data_vola21_MLP14_weights.35-0.0008.keras', 'PRSA_data_vola28_LSTM7_weights.11-0.0012.keras', 'PRSA_data_price_LSTM28_weights.04-42872.1953.keras', 'PRSA_data_at_LSTM_weights.08-0.4803.keras', 'PRSA_data_vola28_MLP14_weights.05-0.0010.keras', 'PRSA_data_price_LSTM14_weights.14-51316.0625.keras', 'PRSA_data_vola7_MLP14_weights.01-0.0101.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0015.keras', 'PRSA_data_vola7_LSTM_weights.01-0.0040.keras', 'PRSA_data_vola14_MLP21_weights.09-0.0011.keras', 'PRSA_data_price_LSTM21_weights.05-43021.9570.keras', 'PRSA_data_vola28_MLP28_weights.03-0.0007.keras', 'PRSA_data_vola7_LSTM14_weights.10-0.0042.keras', 'PRSA_data_price_LSTM_weights.07-67135.3359.keras', 'PRSA_data_vola7_MLP14_weights.18-0.0020.keras', 'PRSA_data_price_LSTM21_weights.18-42894.1055.keras', 'PRSA_data_vola7_MLP7_weights.01-0.0042.keras', 'PRSA_data_vola14_LSTM_weights.05-0.0024.keras', 'PRSA_data_vola21_MLP14_weights.27-0.0008.keras', 'PRSA_data_vola28_MLP28_weights.02-0.0018.keras', 'PRSA_data_vola21_LSTM7_weights.03-0.0017.keras', 'PRSA_data_price_LSTM_weights.10-67105.4297.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0010.keras', 'PRSA_data_vola7_MLP21_weights.03-0.0030.keras', 'PRSA_data_at_MLP7_weights.06-5.3724.keras', 'PRSA_data_at_LSTM_weights.05-1.1226.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0026.keras', 'PRSA_data_vola21_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola14_MLP21_weights.05-0.0012.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.08-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.47-0.0008.keras', 'PRSA_data_price_LSTM14_weights.16-51296.3633.keras', 'PRSA_data_at_MLP7_weights.02-16.3421.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0030.keras', 'PRSA_data_vola21_MLP21_weights.09-0.0009.keras', 'PRSA_data_at_LSTM14_weights.01-6.2168.keras', 'PRSA_data_vola28_LSTM21_weights.01-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.16-0.0009.keras', 'PRSA_data_vola21_LSTM28_weights.07-0.0016.keras', 'PRSA_data_vola21_MLP28_weights.19-0.0010.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0009.keras', 'PRSA_data_vola14_MLP7_weights.09-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.02-0.0011.keras', 'PRSA_data_vola28_LSTM14_weights.03-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.09-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.09-0.0012.keras', 'PRSA_data_vola28_MLP21_weights.65-0.0007.keras', 'PRSA_data_vola21_MLP28_weights.15-0.0011.keras', 'PRSA_data_at_LSTM28_weights.04-0.7851.keras', 'PRSA_data_at_MLP7_weights.42-0.8694.keras', 'PRSA_data_vola21_LSTM28_weights.18-0.0008.keras', 'PRSA_data_vola21_MLP14_weights.04-0.0008.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0040.keras', 'PRSA_data_vola21_LSTM14_weights.12-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0029.keras', 'PRSA_data_vola21_MLP28_weights.01-0.0026.keras', 'PRSA_data_price_LSTM_weights.13-67075.9453.keras', 'PRSA_data_vola14_MLP7_weights.01-0.0041.keras', 'PRSA_data_price_MLP21_weights.02-1476.1034.keras', 'PRSA_data_vola14_LSTM28_weights.19-0.0014.keras', 'PRSA_data_price_MLP14_weights.96-663.0834.keras', 'PRSA_data_vola7_MLP21_weights.94-0.0024.keras', 'PRSA_data_price_LSTM21_weights.03-43041.9570.keras', 'PRSA_data_price_MLP7_weights.12-3798.9531.keras', 'PRSA_data_price_LSTM28_weights.10-42813.0352.keras', 'PRSA_data_vola14_MLP28_weights.20-0.0014.keras', 'PRSA_data_vola7_MLP21_weights.04-0.0027.keras', 'PRSA_data_vola7_LSTM28_weights.18-0.0035.keras', 'PRSA_data_at_MLP7_weights.10-2.7632.keras', 'PRSA_data_at_LSTM21_weights.18-0.3561.keras', 'PRSA_data_price_LSTM_weights.06-67145.4141.keras', 'PRSA_data_price_LSTM28_weights.13-42783.9805.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM28_weights.01-42902.4570.keras', 'PRSA_data_price_LSTM_weights.20-67005.9609.keras', 'PRSA_data_vola7_LSTM21_weights.04-0.0034.keras', 'PRSA_data_at_MLP14_weights.03-0.0461.keras', 'PRSA_data_at_LSTM21_weights.07-0.5113.keras', 'PRSA_data_vola28_MLP28_weights.01-0.0042.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0455.keras', 'PRSA_data_price_MLP28_weights.17-1182.9105.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0018.keras', 'PRSA_data_vola28_MLP14_weights.38-0.0006.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0013.keras', 'PRSA_data_vola7_LSTM21_weights.01-0.0038.keras', 'PRSA_data_price_LSTM14_weights.17-51286.4570.keras', 'PRSA_data_price_LSTM28_weights.16-42754.8008.keras', 'PRSA_data_price_LSTM14_weights.11-51345.4023.keras', 'PRSA_data_price_MLP14_weights.22-638.4409.keras', 'PRSA_data_vola7_MLP28_weights.32-0.0029.keras', 'PRSA_data_price_MLP7_weights.48-1648.1864.keras', 'PRSA_data_vola28_LSTM28_weights.10-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.03-0.0021.keras', 'PRSA_data_vola21_LSTM21_weights.13-0.0010.keras', 'PRSA_data_at_MLP7_weights.07-4.2627.keras', 'PRSA_data_vola7_LSTM21_weights.17-0.0029.keras', 'PRSA_data_vola21_LSTM28_weights.08-0.0011.keras', 'PRSA_data_price_MLP21_weights.05-1072.5081.keras', 'PRSA_data_at_LSTM21_weights.06-0.6219.keras', 'PRSA_data_vola7_LSTM21_weights.05-0.0030.keras', 'PRSA_data_price_MLP14_weights.29-856.8320.keras', 'PRSA_data_vola14_LSTM21_weights.11-0.0011.keras', 'PRSA_data_price_LSTM21_weights.10-42972.4219.keras', 'PRSA_data_at_LSTM21_weights.19-0.2744.keras', 'PRSA_data_price_LSTM_weights.02-67185.9453.keras', 'PRSA_data_vola28_MLP14_weights.21-0.0007.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.10-0.0009.keras', 'PRSA_data_price_LSTM28_weights.15-42764.5898.keras', 'PRSA_data_at_LSTM14_weights.07-0.7059.keras', 'PRSA_data_vola21_LSTM21_weights.01-0.0048.keras', 'PRSA_data_at_LSTM14_weights.03-1.3422.keras', 'PRSA_data_vola14_LSTM14_weights.02-0.0015.keras', 'PRSA_data_vola14_MLP14_weights.02-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.02-0.0034.keras', 'PRSA_data_vola7_MLP21_weights.21-0.0025.keras', 'PRSA_data_at_MLP7_weights.18-1.8396.keras', 'PRSA_data_price_LSTM_weights.11-67095.6172.keras', 'PRSA_data_at_LSTM14_weights.12-0.6378.keras', 'PRSA_data_vola21_LSTM21_weights.07-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.45-0.0030.keras', 'PRSA_data_at_MLP7_weights.26-0.8897.keras', 'PRSA_data_vola21_MLP7_weights.01-0.0014.keras', 'PRSA_data_price_MLP14_weights.19-982.5739.keras', 'PRSA_data_vola14_LSTM_weights.16-0.0017.keras', 'PRSA_data_price_MLP21_weights.94-813.8954.keras', 'PRSA_data_vola7_LSTM21_weights.20-0.0029.keras', 'PRSA_data_at_LSTM_weights.03-1.4842.keras', 'PRSA_data_vola21_MLP28_weights.02-0.0015.keras', 'PRSA_data_price_LSTM21_weights.12-42952.9453.keras', 'PRSA_data_price_MLP28_weights.17-1019.2816.keras', 'PRSA_data_vola14_LSTM_weights.13-0.0019.keras', 'PRSA_data_vola7_LSTM_weights.02-0.0038.keras', 'PRSA_data_vola7_LSTM28_weights.02-0.0042.keras', 'PRSA_data_at_LSTM28_weights.12-0.0705.keras', 'PRSA_data_at_LSTM14_weights.13-0.2270.keras', 'PRSA_data_price_LSTM_weights.18-67026.2891.keras', 'PRSA_data_vola28_MLP21_weights.66-0.0007.keras', 'PRSA_data_at_MLP14_weights.42-0.0373.keras', 'PRSA_data_vola7_MLP14_weights.38-0.0020.keras', 'PRSA_data_vola7_LSTM21_weights.11-0.0029.keras', 'PRSA_data_vola21_LSTM21_weights.12-0.0010.keras', 'PRSA_data_at_LSTM_weights.14-0.5158.keras', 'PRSA_data_vola14_LSTM21_weights.01-0.0019.keras', 'PRSA_data_vola28_LSTM21_weights.11-0.0009.keras', 'PRSA_data_vola7_LSTM28_weights.05-0.0038.keras', 'PRSA_data_vola14_MLP14_weights.10-0.0005.keras', 'PRSA_data_at_MLP21_weights.03-2.1540.keras', 'PRSA_data_at_LSTM_weights.16-0.5377.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.44-0.0008.keras', 'PRSA_data_at_MLP7_weights.16-1.9941.keras', 'PRSA_data_vola28_LSTM28_weights.01-0.0010.keras', 'PRSA_data_vola28_LSTM14_weights.19-0.0006.keras', 'PRSA_data_price_LSTM21_weights.14-42933.4180.keras', 'PRSA_data_vola14_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.82-828.7976.keras', 'PRSA_data_vola28_MLP21_weights.01-0.0030.keras', 'PRSA_data_price_LSTM14_weights.10-51355.2070.keras', 'PRSA_data_vola21_MLP21_weights.18-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.73-0.0007.keras', 'PRSA_data_vola28_MLP7_weights.20-0.0011.keras', 'PRSA_data_price_LSTM21_weights.13-42943.1719.keras', 'PRSA_data_vola28_MLP28_weights.36-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.04-0.0025.keras', 'PRSA_data_at_MLP14_weights.02-0.2715.keras', 'PRSA_data_price_LSTM14_weights.05-51404.8945.keras', 'PRSA_data_price_LSTM28_weights.19-42725.4297.keras', 'PRSA_data_price_MLP28_weights.01-1561.6656.keras', 'PRSA_data_vola21_MLP7_weights.14-0.0011.keras', 'PRSA_data_vola7_MLP7_weights.23-0.0033.keras', 'PRSA_data_vola14_MLP28_weights.01-0.0026.keras', 'PRSA_data_at_MLP14_weights.29-0.0323.keras', 'PRSA_data_vola7_MLP21_weights.01-0.0033.keras', 'PRSA_data_vola21_LSTM_weights.19-0.0014.keras', 'PRSA_data_price_MLP7_weights.03-5953.8076.keras', 'PRSA_data_price_MLP21_weights.42-948.6628.keras', 'PRSA_data_vola21_LSTM14_weights.07-0.0008.keras', 'PRSA_data_vola14_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola7_LSTM28_weights.13-0.0037.keras', 'PRSA_data_vola21_MLP21_weights.01-0.0016.keras', 'PRSA_data_vola14_LSTM21_weights.08-0.0011.keras', 'PRSA_data_price_LSTM21_weights.06-43011.9570.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0019.keras', 'PRSA_data_vola7_MLP28_weights.16-0.0033.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0008.keras', 'PRSA_data_vola28_MLP14_weights.01-0.0031.keras', 'PRSA_data_vola14_MLP28_weights.11-0.0015.keras', 'PRSA_data_price_LSTM14_weights.03-51425.0000.keras', 'PRSA_data_vola7_LSTM28_weights.04-0.0039.keras', 'PRSA_data_vola28_LSTM7_weights.13-0.0011.keras', 'PRSA_data_price_LSTM14_weights.01-51445.4883.keras', 'PRSA_data_vola28_MLP28_weights.22-0.0006.keras', 'PRSA_data_price_LSTM_weights.09-67115.3359.keras', 'PRSA_data_vola28_LSTM7_weights.14-0.0011.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0050.keras', 'PRSA_data_vola21_MLP28_weights.17-0.0010.keras', 'PRSA_data_vola28_LSTM21_weights.09-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.13-0.0010.keras', 'PRSA_data_vola14_MLP14_weights.01-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.50-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.2184.keras', 'PRSA_data_vola14_LSTM_weights.02-0.0030.keras', 'PRSA_data_vola7_MLP28_weights.39-0.0030.keras', 'PRSA_data_price_LSTM21_weights.04-43031.9414.keras', 'PRSA_data_price_LSTM_weights.15-67056.2109.keras', 'PRSA_data_at_MLP7_weights.20-1.6814.keras', 'PRSA_data_vola28_LSTM7_weights.18-0.0010.keras', 'PRSA_data_at_MLP7_weights.95-0.2130.keras', 'PRSA_data_vola14_LSTM28_weights.03-0.0021.keras', 'PRSA_data_at_LSTM_weights.01-7.5653.keras', 'PRSA_data_at_MLP7_weights.05-7.2110.keras', 'PRSA_data_vola21_LSTM28_weights.20-0.0008.keras', 'PRSA_data_at_LSTM_weights.07-0.8569.keras', 'PRSA_data_vola14_LSTM28_weights.12-0.0014.keras', 'PRSA_data_price_LSTM21_weights.16-42913.7969.keras', 'PRSA_data_at_MLP28_weights.04-1.3772.keras', 'PRSA_data_price_LSTM_weights.08-67125.3047.keras', 'PRSA_data_price_LSTM14_weights.07-51384.8633.keras', 'PRSA_data_price_LSTM_weights.04-67165.6016.keras', 'PRSA_data_vola14_LSTM28_weights.15-0.0014.keras', 'PRSA_data_at_MLP14_weights.01-0.4195.keras', 'PRSA_data_price_LSTM28_weights.09-42822.7578.keras', 'PRSA_data_price_LSTM28_weights.03-42882.1953.keras', 'PRSA_data_at_LSTM14_weights.05-0.8408.keras', 'PRSA_data_vola7_MLP28_weights.08-0.0030.keras', 'PRSA_data_price_MLP14_weights.07-3004.8474.keras', 'PRSA_data_price_LSTM21_weights.15-42923.6367.keras', 'PRSA_data_vola7_MLP21_weights.81-0.0025.keras', 'PRSA_data_price_MLP21_weights.86-819.7106.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.05-0.0012.keras', 'PRSA_data_vola21_MLP14_weights.20-0.0007.keras', 'PRSA_data_vola14_MLP28_weights.42-0.0011.keras', 'PRSA_data_price_LSTM_weights.12-67085.7578.keras', 'PRSA_data_vola28_MLP21_weights.13-0.0008.keras', 'PRSA_data_vola14_MLP28_weights.08-0.0016.keras', 'PRSA_data_at_LSTM28_weights.05-0.5407.keras', 'PRSA_data_at_MLP7_weights.86-0.0883.keras', 'PRSA_data_vola28_LSTM14_weights.01-0.0020.keras', 'PRSA_data_vola21_MLP21_weights.71-0.0010.keras', 'PRSA_data_at_MLP28_weights.09-0.1537.keras', 'PRSA_data_vola7_MLP28_weights.48-0.0030.keras', 'PRSA_data_at_MLP28_weights.12-0.0354.keras', 'PRSA_data_at_LSTM14_weights.04-0.9732.keras', 'PRSA_data_at_LSTM14_weights.15-0.2154.keras', 'PRSA_data_vola14_MLP28_weights.21-0.0013.keras', 'PRSA_data_vola21_MLP14_weights.25-0.0009.keras', 'PRSA_data_vola14_LSTM14_weights.01-0.0027.keras', 'PRSA_data_at_LSTM21_weights.20-0.0855.keras', 'PRSA_data_vola21_LSTM28_weights.01-0.0039.keras', 'PRSA_data_price_MLP14_weights.02-8059.2510.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0013.keras', 'PRSA_data_vola28_MLP7_weights.04-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.02-0.0043.keras', 'PRSA_data_price_MLP14_weights.78-683.9230.keras', 'PRSA_data_price_MLP21_weights.19-1002.5925.keras', 'PRSA_data_vola28_MLP21_weights.92-0.0007.keras', 'PRSA_data_price_LSTM28_weights.14-42774.2500.keras', 'PRSA_data_vola21_MLP21_weights.06-0.0010.keras', 'PRSA_data_vola14_LSTM14_weights.03-0.0010.keras', 'PRSA_data_price_LSTM_weights.16-67046.2422.keras', 'PRSA_data_vola7_LSTM28_weights.07-0.0037.keras', 'PRSA_data_vola28_LSTM7_weights.01-0.0017.keras', 'PRSA_data_vola28_LSTM14_weights.02-0.0008.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0022.keras', 'PRSA_data_at_MLP21_weights.03-0.2039.keras', 'PRSA_data_vola14_LSTM28_weights.02-0.0023.keras', 'PRSA_data_at_MLP7_weights.03-13.2212.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0037.keras', 'PRSA_data_vola14_MLP21_weights.69-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.18-0.0013.keras', 'PRSA_data_vola7_MLP21_weights.49-0.0025.keras', 'PRSA_data_vola28_MLP28_weights.31-0.0006.keras', 'PRSA_data_price_MLP21_weights.77-841.5739.keras', 'PRSA_data_vola28_MLP14_weights.35-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.26-0.0031.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0065.keras', 'PRSA_data_price_LSTM14_weights.02-51435.1133.keras', 'PRSA_data_price_LSTM28_weights.20-42714.7695.keras', 'PRSA_data_vola28_LSTM28_weights.16-0.0006.keras', 'PRSA_data_vola14_LSTM28_weights.06-0.0016.keras', 'PRSA_data_vola21_MLP21_weights.13-0.0010.keras', 'PRSA_data_vola28_MLP14_weights.32-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.01-0.0043.keras', 'PRSA_data_vola21_MLP28_weights.08-0.0011.keras', 'PRSA_data_vola21_LSTM28_weights.10-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.14-0.0012.keras', 'PRSA_data_vola7_LSTM28_weights.01-0.0049.keras', 'PRSA_data_price_LSTM28_weights.20-42715.6250.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM_weights.05-67155.5078.keras', 'PRSA_data_price_MLP14_weights.14-1073.6493.keras', 'PRSA_data_vola14_LSTM_weights.03-0.0029.keras', 'PRSA_data_vola14_MLP7_weights.07-0.0021.keras', 'PRSA_data_at_MLP28_weights.03-0.0637.keras', 'PRSA_data_vola21_MLP28_weights.03-0.0012.keras', 'PRSA_data_at_MLP28_weights.05-0.1823.keras', 'PRSA_data_at_MLP21_weights.01-0.9762.keras', 'PRSA_data_vola7_LSTM28_weights.15-0.0035.keras', 'PRSA_data_price_LSTM21_weights.08-42992.0703.keras', 'PRSA_data_vola21_MLP21_weights.22-0.0010.keras', 'PRSA_data_at_LSTM28_weights.01-6.0769.keras', 'PRSA_data_vola28_LSTM28_weights.20-0.0006.keras', 'PRSA_data_at_MLP21_weights.01-4.3241.keras', 'PRSA_data_at_LSTM_weights.06-0.8671.keras', 'PRSA_data_price_MLP7_weights.37-1654.4709.keras', 'PRSA_data_at_LSTM_weights.20-0.4626.keras', 'PRSA_data_vola21_MLP28_weights.27-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.4729.keras', 'PRSA_data_at_MLP7_weights.24-1.4718.keras', 'PRSA_data_at_MLP7_weights.59-0.4636.keras', 'PRSA_data_vola14_LSTM14_weights.06-0.0005.keras', 'PRSA_data_at_MLP7_weights.14-2.2322.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0196.keras', 'PRSA_data_at_LSTM14_weights.19-0.3356.keras', 'PRSA_data_price_LSTM28_weights.18-42735.2305.keras', 'PRSA_data_at_LSTM21_weights.04-0.8083.keras', 'PRSA_data_price_LSTM14_weights.12-51335.6523.keras', 'PRSA_data_vola21_LSTM_weights.06-0.0017.keras', 'PRSA_data_price_LSTM28_weights.05-42862.2383.keras', 'PRSA_data_price_MLP7_weights.10-1248.9592.keras', 'PRSA_data_vola21_MLP28_weights.35-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.03-0.0013.keras', 'PRSA_data_at_LSTM21_weights.03-1.0323.keras', 'PRSA_data_price_MLP14_weights.04-5028.7607.keras', 'PRSA_data_price_LSTM14_weights.08-51374.8945.keras', 'PRSA_data_vola28_MLP14_weights.48-0.0007.keras']
# Predicciones usando el mejor modelo
if best_model21 is not None:
train_preds_price_MLP21 = best_model21.predict(X_train_price21)
val_preds_price_MLP21 = best_model21.predict(X_val_price21)
test_preds_price_MLP21 = best_model21.predict(X_test_price21)
# Aplanar las predicciones si es necesario
train_preds_price_MLP21 = np.squeeze(train_preds_price_MLP21)
val_preds_price_MLP21 = np.squeeze(val_preds_price_MLP21)
test_preds_price_MLP21 = np.squeeze(test_preds_price_MLP21)
# Imprimir las predicciones
print('Predicciones de entrenamiento', train_preds_price_MLP21)
print("Predicciones de validación:", val_preds_price_MLP21)
print("Predicciones de prueba:", test_preds_price_MLP21)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
1/154 [..............................] - ETA: 2s
154/154 [==============================] - 0s 164us/step
1/1 [==============================] - ETA: 0s
1/1 [==============================] - 0s 5ms/step
1/1 [==============================] - ETA: 0s
1/1 [==============================] - 0s 5ms/step
Predicciones de entrenamiento [1.5229378e+00 1.5229378e+00 1.5229378e+00 ... 4.2924215e+04 4.2262543e+04
4.1566129e+04]
Predicciones de validación: [41704.48 40374.72 40553.37 40431.52 40874.7 41290.66 41634.316
42386.434 42852.22 43141.98 43072.465 42889.33 43407.887 43727.613
43108.15 43252.324 43299.793 43985.02 44776.555 46131.29 47446.04 ]
Predicciones de prueba: [48048.016 49294.535 49790.19 50791.574 51835.637 52354.637 52377.98
52371.832 52418.26 52376.26 52182.14 52175.594 51477.582 51876.45
51790.66 53357.36 55565.69 59479.4 61246.395 61904.973 62264.824]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 21 días (\(\tau = 21\)).
data_train_plot_price21, data_val_plot_price21, data_test_plot_price21 = data_plot(df_1_st['Price'], 21)
Datos de entrenamiento:
4998 0.1
4997 0.1
4996 0.1
4995 0.1
4994 0.1
...
46 44339.8
45 45293.3
44 47127.5
43 47758.2
42 48277.3
Name: Price, Length: 4957, dtype: float6421
Datos de validación:
41 49941.3
40 49716.0
39 51782.4
38 51901.3
37 52134.2
36 51646.0
35 52117.5
34 51783.6
33 52263.5
32 51858.2
31 51320.4
30 50740.5
29 51571.6
28 51722.7
27 54495.1
26 57056.2
25 62467.6
24 61169.3
23 62397.7
22 61994.5
21 63135.8
Name: Price, dtype: float6421
Datos de prueba:
20 68270.1
19 63792.6
18 66080.4
17 66855.3
16 68172.0
15 68366.5
14 68964.8
13 72099.1
12 71470.2
11 73066.3
10 71387.5
9 69463.7
8 65314.2
7 68391.2
6 67594.1
5 62050.0
4 67854.0
3 65503.8
2 63785.5
1 64037.8
0 67211.9
Name: Price, dtype: float6421
plot_model(data_train_plot_price21[-100:], data_val_plot_price21, data_test_plot_price21, val_preds_price_MLP21, test_preds_price_MLP21, "Predicciones usando Perceptrón Multicapa (MLP) para un horizonte de 21 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de validación(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train21, jarque_bera_pval_MLP_train21 = diagnostic_plots(y_train_price21, train_preds_price_MLP21)
Ljung-Box LB Statistic: 1009.438215
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_price_MLP_train21 = metricas(y_train_price21,train_preds_price_MLP21)
metrica_price_MLP_train21.index = metrica_price_MLP_train21.index.map({0: 'MLP Entrenamiento Price τ = 21'})
metrica_price_MLP_train21['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train21], index=metrica_price_MLP_train21.index)
metrica_price_MLP_train21['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train21], index=metrica_price_MLP_train21.index)
metrica_price_MLP_train21
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Price τ = 21 | 2.8020e+09 | 40.45% | 311.4 | 570097.46 | 99.75% | 1.5950e-221 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP21, jarque_bera_pvalMLP21 = evaluate_residuals(data_test_plot_price21, test_preds_price_MLP21)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP21_test = metricas(y_test_price21,test_preds_price_MLP21)
metrica_MLP21_test.index = metrica_MLP21_test.index.map({0: 'MLP Prueba Price τ = 21'})
metrica_MLP21_test['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP21], index=metrica_MLP21_test.index)
metrica_MLP21_test['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP21], index=metrica_MLP21_test.index)
metrica_MLP21_test
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Price τ = 21 | 8.6674e+07 | 2.34% | 1314.14 | 4.1273e+06 | 80.51% | 0.0002 | 0.1965 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_price_MPL21)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_price21, train_preds_price_MLP21, y_val_price21, val_preds_price_MLP21, y_test_price21, test_preds_price_MLP21)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra muy cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo. De igual forma se observa una alta variabilidad en los residuales.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado. Sin embargo el modelo muestra un buen desempeño con la unica limitante que los residuales presentan correlación, por tanto, no es confiable.
Horizonte de 28 días (\(\tau=28\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(28,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [100], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_price28, y_train_price28)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
Mejor función de activación: relu
Mejor número de epocas: 100
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -789.1053344726563
A continuación, configuramos la red MLP empleando la API funcional de Keras. Con este método, una capa puede ser designada como la entrada para la siguiente capa en el momento de su definición.
En este contexto, Input es una función utilizada para establecer una capa de entrada en un modelo de red neuronal. La propiedad shape=(28,) define la estructura de los datos de entrada, lo que indica que estos tendrán 28 dimensiones. Por otro lado, dtype='float32' especifica que los elementos de esta capa serán números de punto flotante de 32 bits.
A continuación se define función buil_models para generación de modelos de acuerdo a los parámetros indexados a tráves de la lista.
A idexación de parámetros función build_models_mlp con base a lo indicado en el númeral 3 del parcial práctico.
input_shape28 = 28
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP28 = build_models_mlp(input_shape28, neurons_list, dropout_rates, 'relu')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_52"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_53 (InputLayer) [(None, 28)] 0
dense_208 (Dense) (None, 32) 928
dense_209 (Dense) (None, 16) 528
dense_210 (Dense) (None, 16) 272
dropout_52 (Dropout) (None, 16) 0
dense_211 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_53"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_54 (InputLayer) [(None, 28)] 0
dense_212 (Dense) (None, 32) 928
dense_213 (Dense) (None, 16) 528
dense_214 (Dense) (None, 16) 272
dropout_53 (Dropout) (None, 16) 0
dense_215 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_54"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_55 (InputLayer) [(None, 28)] 0
dense_216 (Dense) (None, 32) 928
dense_217 (Dense) (None, 16) 528
dense_218 (Dense) (None, 16) 272
dropout_54 (Dropout) (None, 16) 0
dense_219 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_55"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_56 (InputLayer) [(None, 28)] 0
dense_220 (Dense) (None, 32) 928
dense_221 (Dense) (None, 16) 528
dense_222 (Dense) (None, 16) 272
dropout_55 (Dropout) (None, 16) 0
dense_223 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_56"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_57 (InputLayer) [(None, 28)] 0
dense_224 (Dense) (None, 32) 928
dense_225 (Dense) (None, 16) 528
dense_226 (Dense) (None, 16) 272
dropout_56 (Dropout) (None, 16) 0
dense_227 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_57"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_58 (InputLayer) [(None, 28)] 0
dense_228 (Dense) (None, 32) 928
dense_229 (Dense) (None, 16) 528
dense_230 (Dense) (None, 16) 272
dropout_57 (Dropout) (None, 16) 0
dense_231 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_58"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_59 (InputLayer) [(None, 28)] 0
dense_232 (Dense) (None, 32) 928
dense_233 (Dense) (None, 16) 528
dense_234 (Dense) (None, 16) 272
dropout_58 (Dropout) (None, 16) 0
dense_235 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_59"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_60 (InputLayer) [(None, 28)] 0
dense_236 (Dense) (None, 32) 928
dense_237 (Dense) (None, 16) 528
dense_238 (Dense) (None, 16) 272
dropout_59 (Dropout) (None, 16) 0
dense_239 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_60"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_61 (InputLayer) [(None, 28)] 0
dense_240 (Dense) (None, 32) 928
dense_241 (Dense) (None, 16) 528
dense_242 (Dense) (None, 16) 272
dropout_60 (Dropout) (None, 16) 0
dense_243 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_61"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_62 (InputLayer) [(None, 28)] 0
dense_244 (Dense) (None, 32) 928
dense_245 (Dense) (None, 16) 528
dense_246 (Dense) (None, 16) 272
dropout_61 (Dropout) (None, 16) 0
dense_247 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_62"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_63 (InputLayer) [(None, 28)] 0
dense_248 (Dense) (None, 32) 928
dense_249 (Dense) (None, 16) 528
dense_250 (Dense) (None, 16) 272
dropout_62 (Dropout) (None, 16) 0
dense_251 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_63"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_64 (InputLayer) [(None, 28)] 0
dense_252 (Dense) (None, 32) 928
dense_253 (Dense) (None, 16) 528
dense_254 (Dense) (None, 16) 272
dropout_63 (Dropout) (None, 16) 0
dense_255 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_64"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_65 (InputLayer) [(None, 28)] 0
dense_256 (Dense) (None, 32) 928
dense_257 (Dense) (None, 16) 528
dense_258 (Dense) (None, 16) 272
dropout_64 (Dropout) (None, 16) 0
dense_259 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_65"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_66 (InputLayer) [(None, 28)] 0
dense_260 (Dense) (None, 32) 928
dense_261 (Dense) (None, 16) 528
dense_262 (Dense) (None, 16) 272
dropout_65 (Dropout) (None, 16) 0
dense_263 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_66"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_67 (InputLayer) [(None, 28)] 0
dense_264 (Dense) (None, 32) 928
dense_265 (Dense) (None, 16) 528
dense_266 (Dense) (None, 16) 272
dropout_66 (Dropout) (None, 16) 0
dense_267 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_67"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_68 (InputLayer) [(None, 28)] 0
dense_268 (Dense) (None, 32) 928
dense_269 (Dense) (None, 16) 528
dense_270 (Dense) (None, 16) 272
dropout_67 (Dropout) (None, 16) 0
dense_271 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights_at = os.path.join('keras_models', 'PRSA_data_price_MLP28_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best28 = ModelCheckpoint(save_weights_at, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP28.
import os
from joblib import dump, load
history_price_MPL28 = []
# Iterar sobre cada modelo en la lista models_MLP14
for i, model in enumerate(models_MLP28):
filename = f'history_price_MPL28_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_price28, y=y_train_price28, batch_size=16, epochs=100, #Las epocas son las estimadas en el grid search
verbose=2, callbacks=[save_best28], validation_data=(X_val_price28, y_val_price28),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_price_MPL28.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_price_MPL28_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL28_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL28_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL28_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL28_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL28_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL28_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL28_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL28_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL28_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL28_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL28_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL28_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL28_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL28_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_MPL28_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_price_MLP28_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model28 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model28 = load_model(best_model_path) # Cargar el modelo
if best_model28 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_price_MLP28_weights.17-1019.2816.keras con val_loss: 1019.2816
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
print("Archivos en el directorio 'keras_models':", files)
Archivos en el directorio 'keras_models': ['PRSA_data_vola14_MLP7_weights.04-0.0018.keras', 'PRSA_data_price_MLP21_weights.01-3579.4128.keras', 'PRSA_data_price_LSTM21_weights.01-43062.3047.keras', 'PRSA_data_price_LSTM28_weights.02-42892.2305.keras', 'PRSA_data_price_LSTM_weights.19-67016.3047.keras', 'PRSA_data_at_LSTM28_weights.19-0.0651.keras', 'PRSA_data_vola14_MLP21_weights.20-0.0011.keras', 'PRSA_data_price_LSTM14_weights.06-51394.8750.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.07-0.0033.keras', 'PRSA_data_price_LSTM21_weights.02-43052.0469.keras', 'PRSA_data_at_MLP7_weights.80-0.0780.keras', 'PRSA_data_vola21_LSTM28_weights.02-0.0022.keras', 'PRSA_data_vola21_LSTM14_weights.04-0.0011.keras', 'PRSA_data_vola7_LSTM21_weights.18-0.0029.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0013.keras', 'PRSA_data_vola28_MLP14_weights.04-0.0013.keras', 'PRSA_data_price_MLP14_weights.52-626.2623.keras', 'PRSA_data_vola7_MLP7_weights.15-0.0041.keras', 'PRSA_data_price_LSTM14_weights.13-51325.8516.keras', 'PRSA_data_vola14_MLP7_weights.02-0.0023.keras', 'PRSA_data_price_LSTM21_weights.11-42962.6562.keras', 'PRSA_data_vola7_LSTM28_weights.10-0.0037.keras', 'PRSA_data_at_LSTM_weights.04-1.2071.keras', 'PRSA_data_price_MLP7_weights.50-1342.5273.keras', 'PRSA_data_price_MLP14_weights.03-5108.7837.keras', 'PRSA_data_vola28_MLP7_weights.14-0.0012.keras', 'PRSA_data_at_MLP7_weights.11-2.4883.keras', 'PRSA_data_price_LSTM28_weights.08-42832.5430.keras', 'PRSA_data_price_MLP28_weights.01-9242.6553.keras', 'PRSA_data_vola21_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.16-1013.2251.keras', 'PRSA_data_price_MLP28_weights.13-1042.5178.keras', 'PRSA_data_vola28_MLP28_weights.34-0.0006.keras', 'PRSA_data_at_MLP7_weights.01-19.6635.keras', 'PRSA_data_at_MLP7_weights.19-1.8379.keras', 'PRSA_data_at_MLP7_weights.12-2.3991.keras', 'PRSA_data_price_MLP7_weights.15-1346.3275.keras', 'PRSA_data_vola14_MLP28_weights.02-0.0024.keras', 'PRSA_data_at_LSTM21_weights.05-0.7513.keras', 'PRSA_data_vola14_MLP28_weights.07-0.0017.keras', 'PRSA_data_at_MLP28_weights.05-0.0930.keras', 'PRSA_data_at_LSTM28_weights.02-2.0782.keras', 'PRSA_data_price_LSTM14_weights.20-51256.7266.keras', 'PRSA_data_vola21_LSTM21_weights.05-0.0013.keras', 'PRSA_data_price_MLP14_weights.12-1304.7009.keras', 'PRSA_data_vola7_LSTM14_weights.09-0.0044.keras', 'PRSA_data_vola28_MLP28_weights.50-0.0006.keras', 'PRSA_data_vola7_MLP14_weights.15-0.0020.keras', 'PRSA_data_price_MLP14_weights.08-1386.5365.keras', 'PRSA_data_price_LSTM21_weights.09-42982.2383.keras', 'PRSA_data_price_MLP28_weights.11-7366.8994.keras', 'PRSA_data_vola14_MLP7_weights.18-0.0015.keras', 'PRSA_data_price_MLP7_weights.14-1734.7238.keras', 'PRSA_data_at_LSTM28_weights.03-1.3148.keras', 'PRSA_data_price_LSTM14_weights.15-51306.2070.keras', 'PRSA_data_vola21_MLP28_weights.25-0.0010.keras', 'PRSA_data_at_LSTM28_weights.10-0.2820.keras', 'PRSA_data_at_LSTM14_weights.02-2.2175.keras', 'PRSA_data_vola28_LSTM7_weights.08-0.0014.keras', 'PRSA_data_price_MLP28_weights.10-1107.4047.keras', 'PRSA_data_vola28_MLP7_weights.01-0.0068.keras', 'PRSA_data_vola21_LSTM21_weights.03-0.0015.keras', 'PRSA_data_price_MLP7_weights.01-9253.3047.keras', 'PRSA_data_vola21_MLP14_weights.31-0.0007.keras', 'PRSA_data_price_LSTM28_weights.12-42793.6172.keras', 'PRSA_data_price_LSTM14_weights.09-51365.0000.keras', 'PRSA_data_vola28_MLP28_weights.42-0.0006.keras', 'PRSA_data_price_MLP7_weights.05-4792.5054.keras', 'PRSA_data_vola21_LSTM_weights.01-0.0024.keras', 'PRSA_data_price_LSTM28_weights.17-42745.0352.keras', 'PRSA_data_vola28_MLP28_weights.45-0.0006.keras', 'PRSA_data_vola28_MLP14_weights.08-0.0010.keras', 'PRSA_data_at_LSTM_weights.11-0.6498.keras', 'PRSA_data_vola28_MLP28_weights.39-0.0007.keras', 'PRSA_data_at_MLP21_weights.11-0.0326.keras', 'PRSA_data_price_LSTM_weights.20-67006.4141.keras', 'PRSA_data_vola28_MLP7_weights.06-0.0014.keras', 'PRSA_data_at_LSTM14_weights.06-0.8145.keras', 'PRSA_data_at_MLP28_weights.01-2.1902.keras', 'PRSA_data_price_MLP14_weights.46-814.1992.keras', '.DS_Store', 'PRSA_data_at_LSTM21_weights.01-5.9452.keras', 'PRSA_data_vola28_LSTM28_weights.12-0.0006.keras', 'PRSA_data_price_LSTM14_weights.18-51276.5625.keras', 'PRSA_data_vola21_LSTM14_weights.01-0.0078.keras', 'PRSA_data_vola14_MLP21_weights.04-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM14_weights.02-0.0011.keras', 'PRSA_data_at_MLP7_weights.09-2.9793.keras', 'PRSA_data_vola21_MLP14_weights.06-0.0007.keras', 'PRSA_data_vola14_MLP21_weights.78-0.0010.keras', 'PRSA_data_price_MLP14_weights.64-711.9107.keras', 'PRSA_data_vola14_MLP28_weights.31-0.0011.keras', 'PRSA_data_vola14_LSTM_weights.08-0.0020.keras', 'PRSA_data_vola14_LSTM21_weights.02-0.0018.keras', 'PRSA_data_at_LSTM21_weights.02-2.0083.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0014.keras', 'PRSA_data_vola14_MLP14_weights.29-0.0005.keras', 'PRSA_data_price_LSTM_weights.03-67175.7734.keras', 'PRSA_data_vola14_LSTM_weights.01-0.0068.keras', 'PRSA_data_price_LSTM28_weights.11-42803.3086.keras', 'PRSA_data_price_MLP28_weights.15-7107.3306.keras', 'PRSA_data_vola14_MLP21_weights.49-0.0010.keras', 'PRSA_data_vola28_MLP7_weights.07-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0010.keras', 'PRSA_data_at_LSTM14_weights.15-0.4082.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0011.keras', 'PRSA_data_at_LSTM21_weights.19-0.3670.keras', 'PRSA_data_vola14_MLP14_weights.06-0.0006.keras', 'PRSA_data_vola14_LSTM21_weights.06-0.0012.keras', 'PRSA_data_price_LSTM_weights.17-67036.2891.keras', 'PRSA_data_vola28_MLP7_weights.03-0.0018.keras', 'PRSA_data_vola7_MLP28_weights.02-0.0039.keras', 'PRSA_data_vola7_LSTM28_weights.16-0.0035.keras', 'PRSA_data_vola21_MLP7_weights.20-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.33-0.0009.keras', 'PRSA_data_price_MLP21_weights.64-870.7020.keras', 'PRSA_data_vola21_MLP14_weights.22-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.14-0.0020.keras', 'PRSA_data_price_LSTM21_weights.17-42903.9648.keras', 'PRSA_data_vola21_MLP14_weights.01-0.0021.keras', 'PRSA_data_vola21_LSTM7_weights.16-0.0011.keras', 'PRSA_data_at_LSTM21_weights.17-0.5070.keras', 'PRSA_data_at_LSTM14_weights.11-0.6656.keras', 'PRSA_data_vola28_MLP7_weights.02-0.0019.keras', 'PRSA_data_vola14_MLP28_weights.05-0.0018.keras', 'PRSA_data_vola14_LSTM14_weights.08-0.0005.keras', 'PRSA_data_at_MLP21_weights.02-0.8171.keras', 'PRSA_data_price_MLP28_weights.02-8134.2202.keras', 'PRSA_data_price_LSTM28_weights.06-42852.3320.keras', 'PRSA_data_vola7_MLP14_weights.02-0.0036.keras', 'PRSA_data_vola14_LSTM28_weights.01-0.0026.keras', 'PRSA_data_at_MLP7_weights.04-9.8837.keras', 'PRSA_data_price_MLP28_weights.16-1274.4368.keras', 'PRSA_data_price_LSTM21_weights.07-43002.0078.keras', 'PRSA_data_vola7_MLP28_weights.23-0.0032.keras', 'PRSA_data_price_LSTM_weights.01-67196.3359.keras', 'PRSA_data_at_MLP28_weights.02-1.9156.keras', 'PRSA_data_vola14_MLP21_weights.02-0.0016.keras', 'PRSA_data_vola28_MLP14_weights.14-0.0007.keras', 'PRSA_data_price_LSTM28_weights.07-42842.4375.keras', 'PRSA_data_price_MLP14_weights.37-825.1805.keras', 'PRSA_data_vola7_MLP14_weights.07-0.0021.keras', 'PRSA_data_price_LSTM14_weights.04-51414.8945.keras', 'PRSA_data_price_MLP7_weights.42-1446.1339.keras', 'PRSA_data_price_MLP21_weights.57-932.6497.keras', 'PRSA_data_price_LSTM14_weights.19-51266.6133.keras', 'PRSA_data_vola21_MLP14_weights.34-0.0008.keras', 'PRSA_data_price_MLP14_weights.01-10485.8262.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0012.keras', 'PRSA_data_at_LSTM_weights.02-2.4966.keras', 'PRSA_data_at_LSTM28_weights.08-0.3981.keras', 'PRSA_data_vola21_LSTM28_weights.15-0.0009.keras', 'PRSA_data_price_LSTM_weights.20-67006.3047.keras', 'PRSA_data_price_LSTM_weights.14-67066.1016.keras', 'PRSA_data_price_LSTM21_weights.19-42884.2422.keras', 'PRSA_data_vola28_LSTM21_weights.18-0.0008.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0013.keras', 'PRSA_data_price_LSTM28_weights.20-42714.5508.keras', 'PRSA_data_at_LSTM14_weights.13-0.5609.keras', 'PRSA_data_vola28_LSTM28_weights.04-0.0009.keras', 'PRSA_data_vola14_MLP21_weights.01-0.0118.keras', 'PRSA_data_vola7_MLP21_weights.02-0.0032.keras', 'PRSA_data_at_MLP7_weights.74-0.2838.keras', 'PRSA_data_price_MLP28_weights.17-1217.0461.keras', 'PRSA_data_at_MLP21_weights.16-1.1973.keras', 'PRSA_data_vola7_MLP7_weights.02-0.0036.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0015.keras', 'PRSA_data_vola14_LSTM14_weights.04-0.0006.keras', 'PRSA_data_vola28_LSTM28_weights.18-0.0006.keras', 'PRSA_data_price_MLP21_weights.74-805.9489.keras', 'PRSA_data_vola21_MLP14_weights.02-0.0011.keras', 'PRSA_data_vola28_MLP21_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM_weights.11-0.0014.keras', 'PRSA_data_vola7_LSTM14_weights.14-0.0036.keras', 'PRSA_data_at_MLP28_weights.03-1.7712.keras', 'PRSA_data_price_LSTM21_weights.20-42874.3555.keras', 'PRSA_data_vola28_LSTM7_weights.20-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.04-0.0009.keras', 'PRSA_data_at_MLP7_weights.08-3.6716.keras', 'PRSA_data_at_MLP21_weights.04-0.0734.keras', 'PRSA_data_vola21_LSTM28_weights.03-0.0021.keras', 'PRSA_data_vola21_MLP14_weights.35-0.0008.keras', 'PRSA_data_vola28_LSTM7_weights.11-0.0012.keras', 'PRSA_data_price_LSTM28_weights.04-42872.1953.keras', 'PRSA_data_at_LSTM_weights.08-0.4803.keras', 'PRSA_data_vola28_MLP14_weights.05-0.0010.keras', 'PRSA_data_price_LSTM14_weights.14-51316.0625.keras', 'PRSA_data_vola7_MLP14_weights.01-0.0101.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0015.keras', 'PRSA_data_vola7_LSTM_weights.01-0.0040.keras', 'PRSA_data_vola14_MLP21_weights.09-0.0011.keras', 'PRSA_data_price_LSTM21_weights.05-43021.9570.keras', 'PRSA_data_vola28_MLP28_weights.03-0.0007.keras', 'PRSA_data_vola7_LSTM14_weights.10-0.0042.keras', 'PRSA_data_price_LSTM_weights.07-67135.3359.keras', 'PRSA_data_vola7_MLP14_weights.18-0.0020.keras', 'PRSA_data_price_LSTM21_weights.18-42894.1055.keras', 'PRSA_data_vola7_MLP7_weights.01-0.0042.keras', 'PRSA_data_vola14_LSTM_weights.05-0.0024.keras', 'PRSA_data_vola21_MLP14_weights.27-0.0008.keras', 'PRSA_data_vola28_MLP28_weights.02-0.0018.keras', 'PRSA_data_vola21_LSTM7_weights.03-0.0017.keras', 'PRSA_data_price_LSTM_weights.10-67105.4297.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0010.keras', 'PRSA_data_vola7_MLP21_weights.03-0.0030.keras', 'PRSA_data_at_MLP7_weights.06-5.3724.keras', 'PRSA_data_at_LSTM_weights.05-1.1226.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0026.keras', 'PRSA_data_vola21_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola14_MLP21_weights.05-0.0012.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.08-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.47-0.0008.keras', 'PRSA_data_price_LSTM14_weights.16-51296.3633.keras', 'PRSA_data_at_MLP7_weights.02-16.3421.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0030.keras', 'PRSA_data_vola21_MLP21_weights.09-0.0009.keras', 'PRSA_data_at_LSTM14_weights.01-6.2168.keras', 'PRSA_data_vola28_LSTM21_weights.01-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.16-0.0009.keras', 'PRSA_data_vola21_LSTM28_weights.07-0.0016.keras', 'PRSA_data_vola21_MLP28_weights.19-0.0010.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0009.keras', 'PRSA_data_vola14_MLP7_weights.09-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.02-0.0011.keras', 'PRSA_data_vola28_LSTM14_weights.03-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.09-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.09-0.0012.keras', 'PRSA_data_vola28_MLP21_weights.65-0.0007.keras', 'PRSA_data_vola21_MLP28_weights.15-0.0011.keras', 'PRSA_data_at_LSTM28_weights.04-0.7851.keras', 'PRSA_data_at_MLP7_weights.42-0.8694.keras', 'PRSA_data_vola21_LSTM28_weights.18-0.0008.keras', 'PRSA_data_vola21_MLP14_weights.04-0.0008.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0040.keras', 'PRSA_data_vola21_LSTM14_weights.12-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0029.keras', 'PRSA_data_vola21_MLP28_weights.01-0.0026.keras', 'PRSA_data_price_LSTM_weights.13-67075.9453.keras', 'PRSA_data_vola14_MLP7_weights.01-0.0041.keras', 'PRSA_data_price_MLP21_weights.02-1476.1034.keras', 'PRSA_data_vola14_LSTM28_weights.19-0.0014.keras', 'PRSA_data_price_MLP14_weights.96-663.0834.keras', 'PRSA_data_vola7_MLP21_weights.94-0.0024.keras', 'PRSA_data_price_LSTM21_weights.03-43041.9570.keras', 'PRSA_data_price_MLP7_weights.12-3798.9531.keras', 'PRSA_data_price_LSTM28_weights.10-42813.0352.keras', 'PRSA_data_vola14_MLP28_weights.20-0.0014.keras', 'PRSA_data_vola7_MLP21_weights.04-0.0027.keras', 'PRSA_data_vola7_LSTM28_weights.18-0.0035.keras', 'PRSA_data_at_MLP7_weights.10-2.7632.keras', 'PRSA_data_at_LSTM21_weights.18-0.3561.keras', 'PRSA_data_price_LSTM_weights.06-67145.4141.keras', 'PRSA_data_price_LSTM28_weights.13-42783.9805.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM28_weights.01-42902.4570.keras', 'PRSA_data_price_LSTM_weights.20-67005.9609.keras', 'PRSA_data_vola7_LSTM21_weights.04-0.0034.keras', 'PRSA_data_at_MLP14_weights.03-0.0461.keras', 'PRSA_data_at_LSTM21_weights.07-0.5113.keras', 'PRSA_data_vola28_MLP28_weights.01-0.0042.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0455.keras', 'PRSA_data_price_MLP28_weights.17-1182.9105.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0018.keras', 'PRSA_data_vola28_MLP14_weights.38-0.0006.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0013.keras', 'PRSA_data_vola7_LSTM21_weights.01-0.0038.keras', 'PRSA_data_price_LSTM14_weights.17-51286.4570.keras', 'PRSA_data_price_LSTM28_weights.16-42754.8008.keras', 'PRSA_data_price_LSTM14_weights.11-51345.4023.keras', 'PRSA_data_price_MLP14_weights.22-638.4409.keras', 'PRSA_data_vola7_MLP28_weights.32-0.0029.keras', 'PRSA_data_price_MLP7_weights.48-1648.1864.keras', 'PRSA_data_vola28_LSTM28_weights.10-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.03-0.0021.keras', 'PRSA_data_vola21_LSTM21_weights.13-0.0010.keras', 'PRSA_data_at_MLP7_weights.07-4.2627.keras', 'PRSA_data_vola7_LSTM21_weights.17-0.0029.keras', 'PRSA_data_vola21_LSTM28_weights.08-0.0011.keras', 'PRSA_data_price_MLP21_weights.05-1072.5081.keras', 'PRSA_data_at_LSTM21_weights.06-0.6219.keras', 'PRSA_data_vola7_LSTM21_weights.05-0.0030.keras', 'PRSA_data_price_MLP14_weights.29-856.8320.keras', 'PRSA_data_vola14_LSTM21_weights.11-0.0011.keras', 'PRSA_data_price_LSTM21_weights.10-42972.4219.keras', 'PRSA_data_at_LSTM21_weights.19-0.2744.keras', 'PRSA_data_price_LSTM_weights.02-67185.9453.keras', 'PRSA_data_vola28_MLP14_weights.21-0.0007.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.10-0.0009.keras', 'PRSA_data_price_LSTM28_weights.15-42764.5898.keras', 'PRSA_data_at_LSTM14_weights.07-0.7059.keras', 'PRSA_data_vola21_LSTM21_weights.01-0.0048.keras', 'PRSA_data_at_LSTM14_weights.03-1.3422.keras', 'PRSA_data_vola14_LSTM14_weights.02-0.0015.keras', 'PRSA_data_vola14_MLP14_weights.02-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.02-0.0034.keras', 'PRSA_data_vola7_MLP21_weights.21-0.0025.keras', 'PRSA_data_at_MLP7_weights.18-1.8396.keras', 'PRSA_data_price_LSTM_weights.11-67095.6172.keras', 'PRSA_data_at_LSTM14_weights.12-0.6378.keras', 'PRSA_data_vola21_LSTM21_weights.07-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.45-0.0030.keras', 'PRSA_data_at_MLP7_weights.26-0.8897.keras', 'PRSA_data_vola21_MLP7_weights.01-0.0014.keras', 'PRSA_data_price_MLP14_weights.19-982.5739.keras', 'PRSA_data_vola14_LSTM_weights.16-0.0017.keras', 'PRSA_data_price_MLP21_weights.94-813.8954.keras', 'PRSA_data_vola7_LSTM21_weights.20-0.0029.keras', 'PRSA_data_at_LSTM_weights.03-1.4842.keras', 'PRSA_data_vola21_MLP28_weights.02-0.0015.keras', 'PRSA_data_price_LSTM21_weights.12-42952.9453.keras', 'PRSA_data_price_MLP28_weights.17-1019.2816.keras', 'PRSA_data_vola14_LSTM_weights.13-0.0019.keras', 'PRSA_data_vola7_LSTM_weights.02-0.0038.keras', 'PRSA_data_vola7_LSTM28_weights.02-0.0042.keras', 'PRSA_data_at_LSTM28_weights.12-0.0705.keras', 'PRSA_data_at_LSTM14_weights.13-0.2270.keras', 'PRSA_data_price_LSTM_weights.18-67026.2891.keras', 'PRSA_data_vola28_MLP21_weights.66-0.0007.keras', 'PRSA_data_at_MLP14_weights.42-0.0373.keras', 'PRSA_data_vola7_MLP14_weights.38-0.0020.keras', 'PRSA_data_vola7_LSTM21_weights.11-0.0029.keras', 'PRSA_data_vola21_LSTM21_weights.12-0.0010.keras', 'PRSA_data_at_LSTM_weights.14-0.5158.keras', 'PRSA_data_vola14_LSTM21_weights.01-0.0019.keras', 'PRSA_data_vola28_LSTM21_weights.11-0.0009.keras', 'PRSA_data_vola7_LSTM28_weights.05-0.0038.keras', 'PRSA_data_vola14_MLP14_weights.10-0.0005.keras', 'PRSA_data_at_MLP21_weights.03-2.1540.keras', 'PRSA_data_at_LSTM_weights.16-0.5377.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.44-0.0008.keras', 'PRSA_data_at_MLP7_weights.16-1.9941.keras', 'PRSA_data_vola28_LSTM28_weights.01-0.0010.keras', 'PRSA_data_vola28_LSTM14_weights.19-0.0006.keras', 'PRSA_data_price_LSTM21_weights.14-42933.4180.keras', 'PRSA_data_vola14_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.82-828.7976.keras', 'PRSA_data_vola28_MLP21_weights.01-0.0030.keras', 'PRSA_data_price_LSTM14_weights.10-51355.2070.keras', 'PRSA_data_vola21_MLP21_weights.18-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.73-0.0007.keras', 'PRSA_data_vola28_MLP7_weights.20-0.0011.keras', 'PRSA_data_price_LSTM21_weights.13-42943.1719.keras', 'PRSA_data_vola28_MLP28_weights.36-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.04-0.0025.keras', 'PRSA_data_at_MLP14_weights.02-0.2715.keras', 'PRSA_data_price_LSTM14_weights.05-51404.8945.keras', 'PRSA_data_price_LSTM28_weights.19-42725.4297.keras', 'PRSA_data_price_MLP28_weights.01-1561.6656.keras', 'PRSA_data_vola21_MLP7_weights.14-0.0011.keras', 'PRSA_data_vola7_MLP7_weights.23-0.0033.keras', 'PRSA_data_vola14_MLP28_weights.01-0.0026.keras', 'PRSA_data_at_MLP14_weights.29-0.0323.keras', 'PRSA_data_vola7_MLP21_weights.01-0.0033.keras', 'PRSA_data_vola21_LSTM_weights.19-0.0014.keras', 'PRSA_data_price_MLP7_weights.03-5953.8076.keras', 'PRSA_data_price_MLP21_weights.42-948.6628.keras', 'PRSA_data_vola21_LSTM14_weights.07-0.0008.keras', 'PRSA_data_vola14_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola7_LSTM28_weights.13-0.0037.keras', 'PRSA_data_vola21_MLP21_weights.01-0.0016.keras', 'PRSA_data_vola14_LSTM21_weights.08-0.0011.keras', 'PRSA_data_price_LSTM21_weights.06-43011.9570.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0019.keras', 'PRSA_data_vola7_MLP28_weights.16-0.0033.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0008.keras', 'PRSA_data_vola28_MLP14_weights.01-0.0031.keras', 'PRSA_data_vola14_MLP28_weights.11-0.0015.keras', 'PRSA_data_price_LSTM14_weights.03-51425.0000.keras', 'PRSA_data_vola7_LSTM28_weights.04-0.0039.keras', 'PRSA_data_vola28_LSTM7_weights.13-0.0011.keras', 'PRSA_data_price_LSTM14_weights.01-51445.4883.keras', 'PRSA_data_vola28_MLP28_weights.22-0.0006.keras', 'PRSA_data_price_LSTM_weights.09-67115.3359.keras', 'PRSA_data_vola28_LSTM7_weights.14-0.0011.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0050.keras', 'PRSA_data_vola21_MLP28_weights.17-0.0010.keras', 'PRSA_data_vola28_LSTM21_weights.09-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.13-0.0010.keras', 'PRSA_data_vola14_MLP14_weights.01-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.50-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.2184.keras', 'PRSA_data_vola14_LSTM_weights.02-0.0030.keras', 'PRSA_data_vola7_MLP28_weights.39-0.0030.keras', 'PRSA_data_price_LSTM21_weights.04-43031.9414.keras', 'PRSA_data_price_LSTM_weights.15-67056.2109.keras', 'PRSA_data_at_MLP7_weights.20-1.6814.keras', 'PRSA_data_vola28_LSTM7_weights.18-0.0010.keras', 'PRSA_data_at_MLP7_weights.95-0.2130.keras', 'PRSA_data_vola14_LSTM28_weights.03-0.0021.keras', 'PRSA_data_at_LSTM_weights.01-7.5653.keras', 'PRSA_data_at_MLP7_weights.05-7.2110.keras', 'PRSA_data_vola21_LSTM28_weights.20-0.0008.keras', 'PRSA_data_at_LSTM_weights.07-0.8569.keras', 'PRSA_data_vola14_LSTM28_weights.12-0.0014.keras', 'PRSA_data_price_LSTM21_weights.16-42913.7969.keras', 'PRSA_data_at_MLP28_weights.04-1.3772.keras', 'PRSA_data_price_LSTM_weights.08-67125.3047.keras', 'PRSA_data_price_LSTM14_weights.07-51384.8633.keras', 'PRSA_data_price_LSTM_weights.04-67165.6016.keras', 'PRSA_data_vola14_LSTM28_weights.15-0.0014.keras', 'PRSA_data_at_MLP14_weights.01-0.4195.keras', 'PRSA_data_price_LSTM28_weights.09-42822.7578.keras', 'PRSA_data_price_LSTM28_weights.03-42882.1953.keras', 'PRSA_data_at_LSTM14_weights.05-0.8408.keras', 'PRSA_data_vola7_MLP28_weights.08-0.0030.keras', 'PRSA_data_price_MLP14_weights.07-3004.8474.keras', 'PRSA_data_price_LSTM21_weights.15-42923.6367.keras', 'PRSA_data_vola7_MLP21_weights.81-0.0025.keras', 'PRSA_data_price_MLP21_weights.86-819.7106.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.05-0.0012.keras', 'PRSA_data_vola21_MLP14_weights.20-0.0007.keras', 'PRSA_data_vola14_MLP28_weights.42-0.0011.keras', 'PRSA_data_price_LSTM_weights.12-67085.7578.keras', 'PRSA_data_vola28_MLP21_weights.13-0.0008.keras', 'PRSA_data_vola14_MLP28_weights.08-0.0016.keras', 'PRSA_data_at_LSTM28_weights.05-0.5407.keras', 'PRSA_data_at_MLP7_weights.86-0.0883.keras', 'PRSA_data_vola28_LSTM14_weights.01-0.0020.keras', 'PRSA_data_vola21_MLP21_weights.71-0.0010.keras', 'PRSA_data_at_MLP28_weights.09-0.1537.keras', 'PRSA_data_vola7_MLP28_weights.48-0.0030.keras', 'PRSA_data_at_MLP28_weights.12-0.0354.keras', 'PRSA_data_at_LSTM14_weights.04-0.9732.keras', 'PRSA_data_at_LSTM14_weights.15-0.2154.keras', 'PRSA_data_vola14_MLP28_weights.21-0.0013.keras', 'PRSA_data_vola21_MLP14_weights.25-0.0009.keras', 'PRSA_data_vola14_LSTM14_weights.01-0.0027.keras', 'PRSA_data_at_LSTM21_weights.20-0.0855.keras', 'PRSA_data_vola21_LSTM28_weights.01-0.0039.keras', 'PRSA_data_price_MLP14_weights.02-8059.2510.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0013.keras', 'PRSA_data_vola28_MLP7_weights.04-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.02-0.0043.keras', 'PRSA_data_price_MLP14_weights.78-683.9230.keras', 'PRSA_data_price_MLP21_weights.19-1002.5925.keras', 'PRSA_data_vola28_MLP21_weights.92-0.0007.keras', 'PRSA_data_price_LSTM28_weights.14-42774.2500.keras', 'PRSA_data_vola21_MLP21_weights.06-0.0010.keras', 'PRSA_data_vola14_LSTM14_weights.03-0.0010.keras', 'PRSA_data_price_LSTM_weights.16-67046.2422.keras', 'PRSA_data_vola7_LSTM28_weights.07-0.0037.keras', 'PRSA_data_vola28_LSTM7_weights.01-0.0017.keras', 'PRSA_data_vola28_LSTM14_weights.02-0.0008.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0022.keras', 'PRSA_data_at_MLP21_weights.03-0.2039.keras', 'PRSA_data_vola14_LSTM28_weights.02-0.0023.keras', 'PRSA_data_at_MLP7_weights.03-13.2212.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0037.keras', 'PRSA_data_vola14_MLP21_weights.69-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.18-0.0013.keras', 'PRSA_data_vola7_MLP21_weights.49-0.0025.keras', 'PRSA_data_vola28_MLP28_weights.31-0.0006.keras', 'PRSA_data_price_MLP21_weights.77-841.5739.keras', 'PRSA_data_vola28_MLP14_weights.35-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.26-0.0031.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0065.keras', 'PRSA_data_price_LSTM14_weights.02-51435.1133.keras', 'PRSA_data_price_LSTM28_weights.20-42714.7695.keras', 'PRSA_data_vola28_LSTM28_weights.16-0.0006.keras', 'PRSA_data_vola14_LSTM28_weights.06-0.0016.keras', 'PRSA_data_vola21_MLP21_weights.13-0.0010.keras', 'PRSA_data_vola28_MLP14_weights.32-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.01-0.0043.keras', 'PRSA_data_vola21_MLP28_weights.08-0.0011.keras', 'PRSA_data_vola21_LSTM28_weights.10-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.14-0.0012.keras', 'PRSA_data_vola7_LSTM28_weights.01-0.0049.keras', 'PRSA_data_price_LSTM28_weights.20-42715.6250.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM_weights.05-67155.5078.keras', 'PRSA_data_price_MLP14_weights.14-1073.6493.keras', 'PRSA_data_vola14_LSTM_weights.03-0.0029.keras', 'PRSA_data_vola14_MLP7_weights.07-0.0021.keras', 'PRSA_data_at_MLP28_weights.03-0.0637.keras', 'PRSA_data_vola21_MLP28_weights.03-0.0012.keras', 'PRSA_data_at_MLP28_weights.05-0.1823.keras', 'PRSA_data_at_MLP21_weights.01-0.9762.keras', 'PRSA_data_vola7_LSTM28_weights.15-0.0035.keras', 'PRSA_data_price_LSTM21_weights.08-42992.0703.keras', 'PRSA_data_vola21_MLP21_weights.22-0.0010.keras', 'PRSA_data_at_LSTM28_weights.01-6.0769.keras', 'PRSA_data_vola28_LSTM28_weights.20-0.0006.keras', 'PRSA_data_at_MLP21_weights.01-4.3241.keras', 'PRSA_data_at_LSTM_weights.06-0.8671.keras', 'PRSA_data_price_MLP7_weights.37-1654.4709.keras', 'PRSA_data_at_LSTM_weights.20-0.4626.keras', 'PRSA_data_vola21_MLP28_weights.27-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.4729.keras', 'PRSA_data_at_MLP7_weights.24-1.4718.keras', 'PRSA_data_at_MLP7_weights.59-0.4636.keras', 'PRSA_data_vola14_LSTM14_weights.06-0.0005.keras', 'PRSA_data_at_MLP7_weights.14-2.2322.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0196.keras', 'PRSA_data_at_LSTM14_weights.19-0.3356.keras', 'PRSA_data_price_LSTM28_weights.18-42735.2305.keras', 'PRSA_data_at_LSTM21_weights.04-0.8083.keras', 'PRSA_data_price_LSTM14_weights.12-51335.6523.keras', 'PRSA_data_vola21_LSTM_weights.06-0.0017.keras', 'PRSA_data_price_LSTM28_weights.05-42862.2383.keras', 'PRSA_data_price_MLP7_weights.10-1248.9592.keras', 'PRSA_data_vola21_MLP28_weights.35-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.03-0.0013.keras', 'PRSA_data_at_LSTM21_weights.03-1.0323.keras', 'PRSA_data_price_MLP14_weights.04-5028.7607.keras', 'PRSA_data_price_LSTM14_weights.08-51374.8945.keras', 'PRSA_data_vola28_MLP14_weights.48-0.0007.keras']
# Predicciones usando el mejor modelo
if best_model28 is not None:
train_preds_price_MLP28 = best_model28.predict(X_train_price28)
val_preds_price_MLP28 = best_model28.predict(X_val_price28)
test_preds_price_MLP28 = best_model28.predict(X_test_price28)
# Aplanar las predicciones si es necesario
train_preds_price_MLP28 = np.squeeze(train_preds_price_MLP28)
val_preds_price_MLP28 = np.squeeze(val_preds_price_MLP28)
test_preds_price_MLP28 = np.squeeze(test_preds_price_MLP28)
# Imprimir las predicciones
print("Predicciones de entrenamiento:", train_preds_price_MLP28)
print("Predicciones de validación:", val_preds_price_MLP28)
print("Predicciones de prueba:", test_preds_price_MLP28)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
1/153 [..............................] - ETA: 2s
153/153 [==============================] - 0s 161us/step
1/1 [==============================] - ETA: 0s
1/1 [==============================] - 0s 5ms/step
1/1 [==============================] - ETA: 0s
1/1 [==============================] - 0s 5ms/step
Predicciones de entrenamiento: [3.0964053e-01 3.0964053e-01 3.0964053e-01 ... 4.3267039e+04 4.2537688e+04
4.2393188e+04]
Predicciones de validación: [42475.094 43321.85 44164.906 44205.18 43593.75 43815.117 43759.504
43884.375 45899.92 46405.082 45573.586 45852.36 45133.43 43342.723
42550.6 44133.246 43827.867 42810.008 42008.35 41694.617 42095.5
41638.273 41023.477 40232.953 39788.406 40017.9 40913.023 41272.273]
Predicciones de prueba: [41749.516 41569.277 42501.797 42760.73 41647.336 42642.383 44055.54
43399.75 43337.312 43218.637 42880.406 44375.57 45780.65 47243.31
47328.832 48313.65 49592.38 50884. 51286.92 52384.156 51499.492
51900.83 52274.92 52306.23 52539.137 52480.887 51751.715 52079.617]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 28 días (\(\tau = 28\)).
data_train_plot_price28, data_val_plot_price28 , data_test_plot_price28 = data_plot(df_1_st['Price'], 28)
Datos de entrenamiento:
4998 0.1
4997 0.1
4996 0.1
4995 0.1
4994 0.1
...
60 40086.0
59 39935.7
58 41811.3
57 42120.9
56 42030.7
Name: Price, Length: 4943, dtype: float6428
Datos de validación:
55 43299.8
54 42946.2
53 42580.5
52 43081.4
51 43194.7
50 43005.7
49 42581.4
48 42697.2
47 43087.7
46 44339.8
45 45293.3
44 47127.5
43 47758.2
42 48277.3
41 49941.3
40 49716.0
39 51782.4
38 51901.3
37 52134.2
36 51646.0
35 52117.5
34 51783.6
33 52263.5
32 51858.2
31 51320.4
30 50740.5
29 51571.6
28 51722.7
Name: Price, dtype: float6428
Datos de prueba:
27 54495.1
26 57056.2
25 62467.6
24 61169.3
23 62397.7
22 61994.5
21 63135.8
20 68270.1
19 63792.6
18 66080.4
17 66855.3
16 68172.0
15 68366.5
14 68964.8
13 72099.1
12 71470.2
11 73066.3
10 71387.5
9 69463.7
8 65314.2
7 68391.2
6 67594.1
5 62050.0
4 67854.0
3 65503.8
2 63785.5
1 64037.8
0 67211.9
Name: Price, dtype: float6428
plot_model(data_train_plot_price28[-100:], data_val_plot_price28, data_test_plot_price28, val_preds_price_MLP28, test_preds_price_MLP28, "Predicciones usando Perceptrón Multicapa (MLP) para un horizonte de 28 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de validación(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train28, jarque_bera_pval_MLP_train28 = diagnostic_plots(y_train_price28, train_preds_price_MLP28)
Ljung-Box LB Statistic: 1124.660608
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_price_MLP_train28 = metricas(y_train_price28,train_preds_price_MLP28)
metrica_price_MLP_train28.index = metrica_price_MLP_train28.index.map({0: 'MLP Entrenamiento Price τ = 28'})
metrica_price_MLP_train28['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train28], index=metrica_price_MLP_train28.index)
metrica_price_MLP_train28['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train28], index=metrica_price_MLP_train28.index)
metrica_price_MLP_train28
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Price τ = 28 | 3.4518e+09 | 8.41% | 348.66 | 706317.16 | 99.69% | 1.4424e-246 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP28, jarque_bera_pvalMLP28 = evaluate_residuals(data_test_plot_price28, test_preds_price_MLP28)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP28_test = metricas(y_test_price28,test_preds_price_MLP28)
metrica_MLP28_test.index = metrica_MLP28_test.index.map({0: 'MLP Prueba Price τ = 28'})
metrica_MLP28_test['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP28], index=metrica_MLP28_test.index)
metrica_MLP28_test['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP28], index=metrica_MLP28_test.index)
metrica_MLP28_test
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Price τ = 28 | 5.0452e+07 | 2.29% | 1087.84 | 1.8018e+06 | 88.21% | 5.4064e-05 | 0.3951 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_price_MPL28)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_price28, train_preds_price_MLP28, y_val_price28, val_preds_price_MLP28, y_test_price28, test_preds_price_MLP28)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra muy cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo. De igual forma se observa una alta variabilidad en los residuales.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado. Sin embargo el modelo muestra un buen desempeño con la unica limitante que los residuales presentan correlación, por tanto, no es confiable.
Price: Memoria a Corto y Largo Plazo (LSTM) #
Ya definimos los regresores(X) y la variable objetivo(y) para el proceso de entrenamiento y validación en la selección correspondiente al modelo Perceptrones Multicapa a tráves de la función create_time_series_datasets(), sin embargo, ésta se utiliza para generar arreglos 2D de forma (números de muestras, números de paso de tiempo). Dado que la entrada a las capas de una RNN debe ser de forma: número de muestras, número de paso de tiempo, número de características por paso de tiempo; procedemos con la sefinición de la función change_dimension_lstm() para realizar la transformación de 2D a 3D.
def change_dimension_lstm(X_train, X_val, X_test):
X_train_lstm, X_val_lstm, X_test_lstm = X_train.reshape((X_train.shape[0], X_train.shape[1], 1)), X_val.reshape((X_val.shape[0], X_val.shape[1], 1)), X_test.reshape((X_test.shape[0], X_test.shape[1], 1))
print('Shape of 3D arrays X:', X_train_lstm.shape, X_val_lstm.shape, X_test_lstm.shape)
return X_train_lstm, X_val_lstm, X_test_lstm
Horizonte de 7 días (\(\tau=7\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_price_lstm_7, X_val_price_lstm_7, X_test_price_lstm_7 = change_dimension_lstm(X_train_price7, X_val_price7, X_test_price7)
Shape of 3D arrays X: (4971, 7, 1) (7, 7, 1) (7, 7, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(7,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['sigmoid'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=10, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_price_lstm_7, y_train_price7)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
Mejor función de activación: sigmoid
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 10350.81123214692
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A continuación se define función buil_models_lstm para generación de modelos de acuerdo a los parámetros indexados a tráves de la lista.
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Dropout
from tensorflow.keras.models import Model
def build_models_lstm(input_shape, neurons_list, dropout_rates, optimizer):
models = []
for neurons in neurons_list:
for dropout in dropout_rates:
input_layer_lstm = Input(shape=(input_shape,1), dtype='float32')
lstm_layer1 = LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
# Crear el modelo
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
# Compilar el modelo
learning_rate = 0.001 # Ajusta este valor según tus necesidades
optimizer = tf.keras.optimizers.legacy.Adam(learning_rate=learning_rate)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer) #SGD(lr=0.001, decay=1e-5))
ts_model_lstm.summary()
# Guardar el modelo en la lista
models.append(ts_model_lstm)
# Mostrar un resumen del modelo
print(f'Modelo con {neurons} neuronas y dropout {dropout}:')
ts_model_lstm.summary()
print('-' * 40)
return models
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape7 = 7
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM7 = build_models_lstm(input_shape7, neurons_list, dropout_rates ,'SGD')
Model: "model_69"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_70 (InputLayer) [(None, 7, 1)] 0
lstm_2 (LSTM) (None, 7, 64) 16896
lstm_3 (LSTM) (None, 32) 12416
dropout_69 (Dropout) (None, 32) 0
dense_273 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_69"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_70 (InputLayer) [(None, 7, 1)] 0
lstm_2 (LSTM) (None, 7, 64) 16896
lstm_3 (LSTM) (None, 32) 12416
dropout_69 (Dropout) (None, 32) 0
dense_273 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_70"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_71 (InputLayer) [(None, 7, 1)] 0
lstm_4 (LSTM) (None, 7, 64) 16896
lstm_5 (LSTM) (None, 32) 12416
dropout_70 (Dropout) (None, 32) 0
dense_274 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_70"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_71 (InputLayer) [(None, 7, 1)] 0
lstm_4 (LSTM) (None, 7, 64) 16896
lstm_5 (LSTM) (None, 32) 12416
dropout_70 (Dropout) (None, 32) 0
dense_274 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_71"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_72 (InputLayer) [(None, 7, 1)] 0
lstm_6 (LSTM) (None, 7, 64) 16896
lstm_7 (LSTM) (None, 32) 12416
dropout_71 (Dropout) (None, 32) 0
dense_275 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_71"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_72 (InputLayer) [(None, 7, 1)] 0
lstm_6 (LSTM) (None, 7, 64) 16896
lstm_7 (LSTM) (None, 32) 12416
dropout_71 (Dropout) (None, 32) 0
dense_275 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_72"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_73 (InputLayer) [(None, 7, 1)] 0
lstm_8 (LSTM) (None, 7, 64) 16896
lstm_9 (LSTM) (None, 32) 12416
dropout_72 (Dropout) (None, 32) 0
dense_276 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_72"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_73 (InputLayer) [(None, 7, 1)] 0
lstm_8 (LSTM) (None, 7, 64) 16896
lstm_9 (LSTM) (None, 32) 12416
dropout_72 (Dropout) (None, 32) 0
dense_276 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_73"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_74 (InputLayer) [(None, 7, 1)] 0
lstm_10 (LSTM) (None, 7, 64) 16896
lstm_11 (LSTM) (None, 32) 12416
dropout_73 (Dropout) (None, 32) 0
dense_277 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_73"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_74 (InputLayer) [(None, 7, 1)] 0
lstm_10 (LSTM) (None, 7, 64) 16896
lstm_11 (LSTM) (None, 32) 12416
dropout_73 (Dropout) (None, 32) 0
dense_277 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_74"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_75 (InputLayer) [(None, 7, 1)] 0
lstm_12 (LSTM) (None, 7, 64) 16896
lstm_13 (LSTM) (None, 32) 12416
dropout_74 (Dropout) (None, 32) 0
dense_278 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_74"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_75 (InputLayer) [(None, 7, 1)] 0
lstm_12 (LSTM) (None, 7, 64) 16896
lstm_13 (LSTM) (None, 32) 12416
dropout_74 (Dropout) (None, 32) 0
dense_278 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_75"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_76 (InputLayer) [(None, 7, 1)] 0
lstm_14 (LSTM) (None, 7, 64) 16896
lstm_15 (LSTM) (None, 32) 12416
dropout_75 (Dropout) (None, 32) 0
dense_279 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_75"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_76 (InputLayer) [(None, 7, 1)] 0
lstm_14 (LSTM) (None, 7, 64) 16896
lstm_15 (LSTM) (None, 32) 12416
dropout_75 (Dropout) (None, 32) 0
dense_279 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_76"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_77 (InputLayer) [(None, 7, 1)] 0
lstm_16 (LSTM) (None, 7, 64) 16896
lstm_17 (LSTM) (None, 32) 12416
dropout_76 (Dropout) (None, 32) 0
dense_280 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_76"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_77 (InputLayer) [(None, 7, 1)] 0
lstm_16 (LSTM) (None, 7, 64) 16896
lstm_17 (LSTM) (None, 32) 12416
dropout_76 (Dropout) (None, 32) 0
dense_280 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_77"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_78 (InputLayer) [(None, 7, 1)] 0
lstm_18 (LSTM) (None, 7, 64) 16896
lstm_19 (LSTM) (None, 32) 12416
dropout_77 (Dropout) (None, 32) 0
dense_281 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_77"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_78 (InputLayer) [(None, 7, 1)] 0
lstm_18 (LSTM) (None, 7, 64) 16896
lstm_19 (LSTM) (None, 32) 12416
dropout_77 (Dropout) (None, 32) 0
dense_281 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_78"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_79 (InputLayer) [(None, 7, 1)] 0
lstm_20 (LSTM) (None, 7, 64) 16896
lstm_21 (LSTM) (None, 32) 12416
dropout_78 (Dropout) (None, 32) 0
dense_282 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_78"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_79 (InputLayer) [(None, 7, 1)] 0
lstm_20 (LSTM) (None, 7, 64) 16896
lstm_21 (LSTM) (None, 32) 12416
dropout_78 (Dropout) (None, 32) 0
dense_282 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_79"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_80 (InputLayer) [(None, 7, 1)] 0
lstm_22 (LSTM) (None, 7, 64) 16896
lstm_23 (LSTM) (None, 32) 12416
dropout_79 (Dropout) (None, 32) 0
dense_283 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_79"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_80 (InputLayer) [(None, 7, 1)] 0
lstm_22 (LSTM) (None, 7, 64) 16896
lstm_23 (LSTM) (None, 32) 12416
dropout_79 (Dropout) (None, 32) 0
dense_283 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_80"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_81 (InputLayer) [(None, 7, 1)] 0
lstm_24 (LSTM) (None, 7, 64) 16896
lstm_25 (LSTM) (None, 32) 12416
dropout_80 (Dropout) (None, 32) 0
dense_284 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_80"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_81 (InputLayer) [(None, 7, 1)] 0
lstm_24 (LSTM) (None, 7, 64) 16896
lstm_25 (LSTM) (None, 32) 12416
dropout_80 (Dropout) (None, 32) 0
dense_284 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_81"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_82 (InputLayer) [(None, 7, 1)] 0
lstm_26 (LSTM) (None, 7, 64) 16896
lstm_27 (LSTM) (None, 32) 12416
dropout_81 (Dropout) (None, 32) 0
dense_285 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_81"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_82 (InputLayer) [(None, 7, 1)] 0
lstm_26 (LSTM) (None, 7, 64) 16896
lstm_27 (LSTM) (None, 32) 12416
dropout_81 (Dropout) (None, 32) 0
dense_285 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_82"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_83 (InputLayer) [(None, 7, 1)] 0
lstm_28 (LSTM) (None, 7, 64) 16896
lstm_29 (LSTM) (None, 32) 12416
dropout_82 (Dropout) (None, 32) 0
dense_286 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_82"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_83 (InputLayer) [(None, 7, 1)] 0
lstm_28 (LSTM) (None, 7, 64) 16896
lstm_29 (LSTM) (None, 32) 12416
dropout_82 (Dropout) (None, 32) 0
dense_286 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_83"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_84 (InputLayer) [(None, 7, 1)] 0
lstm_30 (LSTM) (None, 7, 64) 16896
lstm_31 (LSTM) (None, 32) 12416
dropout_83 (Dropout) (None, 32) 0
dense_287 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_83"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_84 (InputLayer) [(None, 7, 1)] 0
lstm_30 (LSTM) (None, 7, 64) 16896
lstm_31 (LSTM) (None, 32) 12416
dropout_83 (Dropout) (None, 32) 0
dense_287 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_84"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_85 (InputLayer) [(None, 7, 1)] 0
lstm_32 (LSTM) (None, 7, 64) 16896
lstm_33 (LSTM) (None, 32) 12416
dropout_84 (Dropout) (None, 32) 0
dense_288 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_84"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_85 (InputLayer) [(None, 7, 1)] 0
lstm_32 (LSTM) (None, 7, 64) 16896
lstm_33 (LSTM) (None, 32) 12416
dropout_84 (Dropout) (None, 32) 0
dense_288 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights_at = os.path.join('keras_models', 'PRSA_data_price_LSTM_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best7_lstm = ModelCheckpoint(save_weights_at, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM7.
import os
from joblib import dump, load
history_price_LSTM7 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM7):
filename = f'history_price_LSTM_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_price_lstm_7, y=y_train_price7, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best7_lstm], validation_data=(X_val_price_lstm_7, y_val_price7),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_price_LSTM7.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_price_LSTM_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_price_LSTM_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best7_lstm = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best7_lstm = load_model(best_model_path) # Cargar el modelo
if best7_lstm is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_price_LSTM_weights.20-67005.9609.keras con val_loss: 67005.9609
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
print("Archivos en el directorio 'keras_models':", files)
Archivos en el directorio 'keras_models': ['PRSA_data_vola14_MLP7_weights.04-0.0018.keras', 'PRSA_data_price_MLP21_weights.01-3579.4128.keras', 'PRSA_data_price_LSTM21_weights.01-43062.3047.keras', 'PRSA_data_price_LSTM28_weights.02-42892.2305.keras', 'PRSA_data_price_LSTM_weights.19-67016.3047.keras', 'PRSA_data_at_LSTM28_weights.19-0.0651.keras', 'PRSA_data_vola14_MLP21_weights.20-0.0011.keras', 'PRSA_data_price_LSTM14_weights.06-51394.8750.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.07-0.0033.keras', 'PRSA_data_price_LSTM21_weights.02-43052.0469.keras', 'PRSA_data_at_MLP7_weights.80-0.0780.keras', 'PRSA_data_vola21_LSTM28_weights.02-0.0022.keras', 'PRSA_data_vola21_LSTM14_weights.04-0.0011.keras', 'PRSA_data_vola7_LSTM21_weights.18-0.0029.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0013.keras', 'PRSA_data_vola28_MLP14_weights.04-0.0013.keras', 'PRSA_data_price_MLP14_weights.52-626.2623.keras', 'PRSA_data_vola7_MLP7_weights.15-0.0041.keras', 'PRSA_data_price_LSTM14_weights.13-51325.8516.keras', 'PRSA_data_vola14_MLP7_weights.02-0.0023.keras', 'PRSA_data_price_LSTM21_weights.11-42962.6562.keras', 'PRSA_data_vola7_LSTM28_weights.10-0.0037.keras', 'PRSA_data_at_LSTM_weights.04-1.2071.keras', 'PRSA_data_price_MLP7_weights.50-1342.5273.keras', 'PRSA_data_price_MLP14_weights.03-5108.7837.keras', 'PRSA_data_vola28_MLP7_weights.14-0.0012.keras', 'PRSA_data_at_MLP7_weights.11-2.4883.keras', 'PRSA_data_price_LSTM28_weights.08-42832.5430.keras', 'PRSA_data_price_MLP28_weights.01-9242.6553.keras', 'PRSA_data_vola21_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.16-1013.2251.keras', 'PRSA_data_price_MLP28_weights.13-1042.5178.keras', 'PRSA_data_vola28_MLP28_weights.34-0.0006.keras', 'PRSA_data_at_MLP7_weights.01-19.6635.keras', 'PRSA_data_at_MLP7_weights.19-1.8379.keras', 'PRSA_data_at_MLP7_weights.12-2.3991.keras', 'PRSA_data_price_MLP7_weights.15-1346.3275.keras', 'PRSA_data_vola14_MLP28_weights.02-0.0024.keras', 'PRSA_data_at_LSTM21_weights.05-0.7513.keras', 'PRSA_data_vola14_MLP28_weights.07-0.0017.keras', 'PRSA_data_at_MLP28_weights.05-0.0930.keras', 'PRSA_data_at_LSTM28_weights.02-2.0782.keras', 'PRSA_data_price_LSTM14_weights.20-51256.7266.keras', 'PRSA_data_vola21_LSTM21_weights.05-0.0013.keras', 'PRSA_data_price_MLP14_weights.12-1304.7009.keras', 'PRSA_data_vola7_LSTM14_weights.09-0.0044.keras', 'PRSA_data_vola28_MLP28_weights.50-0.0006.keras', 'PRSA_data_vola7_MLP14_weights.15-0.0020.keras', 'PRSA_data_price_MLP14_weights.08-1386.5365.keras', 'PRSA_data_price_LSTM21_weights.09-42982.2383.keras', 'PRSA_data_price_MLP28_weights.11-7366.8994.keras', 'PRSA_data_vola14_MLP7_weights.18-0.0015.keras', 'PRSA_data_price_MLP7_weights.14-1734.7238.keras', 'PRSA_data_at_LSTM28_weights.03-1.3148.keras', 'PRSA_data_price_LSTM14_weights.15-51306.2070.keras', 'PRSA_data_vola21_MLP28_weights.25-0.0010.keras', 'PRSA_data_at_LSTM28_weights.10-0.2820.keras', 'PRSA_data_at_LSTM14_weights.02-2.2175.keras', 'PRSA_data_vola28_LSTM7_weights.08-0.0014.keras', 'PRSA_data_price_MLP28_weights.10-1107.4047.keras', 'PRSA_data_vola28_MLP7_weights.01-0.0068.keras', 'PRSA_data_vola21_LSTM21_weights.03-0.0015.keras', 'PRSA_data_price_MLP7_weights.01-9253.3047.keras', 'PRSA_data_vola21_MLP14_weights.31-0.0007.keras', 'PRSA_data_price_LSTM28_weights.12-42793.6172.keras', 'PRSA_data_price_LSTM14_weights.09-51365.0000.keras', 'PRSA_data_vola28_MLP28_weights.42-0.0006.keras', 'PRSA_data_price_MLP7_weights.05-4792.5054.keras', 'PRSA_data_vola21_LSTM_weights.01-0.0024.keras', 'PRSA_data_price_LSTM28_weights.17-42745.0352.keras', 'PRSA_data_vola28_MLP28_weights.45-0.0006.keras', 'PRSA_data_vola28_MLP14_weights.08-0.0010.keras', 'PRSA_data_at_LSTM_weights.11-0.6498.keras', 'PRSA_data_vola28_MLP28_weights.39-0.0007.keras', 'PRSA_data_at_MLP21_weights.11-0.0326.keras', 'PRSA_data_price_LSTM_weights.20-67006.4141.keras', 'PRSA_data_vola28_MLP7_weights.06-0.0014.keras', 'PRSA_data_at_LSTM14_weights.06-0.8145.keras', 'PRSA_data_at_MLP28_weights.01-2.1902.keras', 'PRSA_data_price_MLP14_weights.46-814.1992.keras', '.DS_Store', 'PRSA_data_at_LSTM21_weights.01-5.9452.keras', 'PRSA_data_vola28_LSTM28_weights.12-0.0006.keras', 'PRSA_data_price_LSTM14_weights.18-51276.5625.keras', 'PRSA_data_vola21_LSTM14_weights.01-0.0078.keras', 'PRSA_data_vola14_MLP21_weights.04-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM14_weights.02-0.0011.keras', 'PRSA_data_at_MLP7_weights.09-2.9793.keras', 'PRSA_data_vola21_MLP14_weights.06-0.0007.keras', 'PRSA_data_vola14_MLP21_weights.78-0.0010.keras', 'PRSA_data_price_MLP14_weights.64-711.9107.keras', 'PRSA_data_vola14_MLP28_weights.31-0.0011.keras', 'PRSA_data_vola14_LSTM_weights.08-0.0020.keras', 'PRSA_data_vola14_LSTM21_weights.02-0.0018.keras', 'PRSA_data_at_LSTM21_weights.02-2.0083.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0014.keras', 'PRSA_data_vola14_MLP14_weights.29-0.0005.keras', 'PRSA_data_price_LSTM_weights.03-67175.7734.keras', 'PRSA_data_vola14_LSTM_weights.01-0.0068.keras', 'PRSA_data_price_LSTM28_weights.11-42803.3086.keras', 'PRSA_data_price_MLP28_weights.15-7107.3306.keras', 'PRSA_data_vola14_MLP21_weights.49-0.0010.keras', 'PRSA_data_vola28_MLP7_weights.07-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0010.keras', 'PRSA_data_at_LSTM14_weights.15-0.4082.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0011.keras', 'PRSA_data_at_LSTM21_weights.19-0.3670.keras', 'PRSA_data_vola14_MLP14_weights.06-0.0006.keras', 'PRSA_data_vola14_LSTM21_weights.06-0.0012.keras', 'PRSA_data_price_LSTM_weights.17-67036.2891.keras', 'PRSA_data_vola28_MLP7_weights.03-0.0018.keras', 'PRSA_data_vola7_MLP28_weights.02-0.0039.keras', 'PRSA_data_vola7_LSTM28_weights.16-0.0035.keras', 'PRSA_data_vola21_MLP7_weights.20-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.33-0.0009.keras', 'PRSA_data_price_MLP21_weights.64-870.7020.keras', 'PRSA_data_vola21_MLP14_weights.22-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.14-0.0020.keras', 'PRSA_data_price_LSTM21_weights.17-42903.9648.keras', 'PRSA_data_vola21_MLP14_weights.01-0.0021.keras', 'PRSA_data_vola21_LSTM7_weights.16-0.0011.keras', 'PRSA_data_at_LSTM21_weights.17-0.5070.keras', 'PRSA_data_at_LSTM14_weights.11-0.6656.keras', 'PRSA_data_vola28_MLP7_weights.02-0.0019.keras', 'PRSA_data_vola14_MLP28_weights.05-0.0018.keras', 'PRSA_data_vola14_LSTM14_weights.08-0.0005.keras', 'PRSA_data_at_MLP21_weights.02-0.8171.keras', 'PRSA_data_price_MLP28_weights.02-8134.2202.keras', 'PRSA_data_price_LSTM28_weights.06-42852.3320.keras', 'PRSA_data_vola7_MLP14_weights.02-0.0036.keras', 'PRSA_data_vola14_LSTM28_weights.01-0.0026.keras', 'PRSA_data_at_MLP7_weights.04-9.8837.keras', 'PRSA_data_price_MLP28_weights.16-1274.4368.keras', 'PRSA_data_price_LSTM21_weights.07-43002.0078.keras', 'PRSA_data_vola7_MLP28_weights.23-0.0032.keras', 'PRSA_data_price_LSTM_weights.01-67196.3359.keras', 'PRSA_data_at_MLP28_weights.02-1.9156.keras', 'PRSA_data_vola14_MLP21_weights.02-0.0016.keras', 'PRSA_data_vola28_MLP14_weights.14-0.0007.keras', 'PRSA_data_price_LSTM28_weights.07-42842.4375.keras', 'PRSA_data_price_MLP14_weights.37-825.1805.keras', 'PRSA_data_vola7_MLP14_weights.07-0.0021.keras', 'PRSA_data_price_LSTM14_weights.04-51414.8945.keras', 'PRSA_data_price_MLP7_weights.42-1446.1339.keras', 'PRSA_data_price_MLP21_weights.57-932.6497.keras', 'PRSA_data_price_LSTM14_weights.19-51266.6133.keras', 'PRSA_data_vola21_MLP14_weights.34-0.0008.keras', 'PRSA_data_price_MLP14_weights.01-10485.8262.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0012.keras', 'PRSA_data_at_LSTM_weights.02-2.4966.keras', 'PRSA_data_at_LSTM28_weights.08-0.3981.keras', 'PRSA_data_vola21_LSTM28_weights.15-0.0009.keras', 'PRSA_data_price_LSTM_weights.20-67006.3047.keras', 'PRSA_data_price_LSTM_weights.14-67066.1016.keras', 'PRSA_data_price_LSTM21_weights.19-42884.2422.keras', 'PRSA_data_vola28_LSTM21_weights.18-0.0008.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0013.keras', 'PRSA_data_price_LSTM28_weights.20-42714.5508.keras', 'PRSA_data_at_LSTM14_weights.13-0.5609.keras', 'PRSA_data_vola28_LSTM28_weights.04-0.0009.keras', 'PRSA_data_vola14_MLP21_weights.01-0.0118.keras', 'PRSA_data_vola7_MLP21_weights.02-0.0032.keras', 'PRSA_data_at_MLP7_weights.74-0.2838.keras', 'PRSA_data_price_MLP28_weights.17-1217.0461.keras', 'PRSA_data_at_MLP21_weights.16-1.1973.keras', 'PRSA_data_vola7_MLP7_weights.02-0.0036.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0015.keras', 'PRSA_data_vola14_LSTM14_weights.04-0.0006.keras', 'PRSA_data_vola28_LSTM28_weights.18-0.0006.keras', 'PRSA_data_price_MLP21_weights.74-805.9489.keras', 'PRSA_data_vola21_MLP14_weights.02-0.0011.keras', 'PRSA_data_vola28_MLP21_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM_weights.11-0.0014.keras', 'PRSA_data_vola7_LSTM14_weights.14-0.0036.keras', 'PRSA_data_at_MLP28_weights.03-1.7712.keras', 'PRSA_data_price_LSTM21_weights.20-42874.3555.keras', 'PRSA_data_vola28_LSTM7_weights.20-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.04-0.0009.keras', 'PRSA_data_at_MLP7_weights.08-3.6716.keras', 'PRSA_data_at_MLP21_weights.04-0.0734.keras', 'PRSA_data_vola21_LSTM28_weights.03-0.0021.keras', 'PRSA_data_vola21_MLP14_weights.35-0.0008.keras', 'PRSA_data_vola28_LSTM7_weights.11-0.0012.keras', 'PRSA_data_price_LSTM28_weights.04-42872.1953.keras', 'PRSA_data_at_LSTM_weights.08-0.4803.keras', 'PRSA_data_vola28_MLP14_weights.05-0.0010.keras', 'PRSA_data_price_LSTM14_weights.14-51316.0625.keras', 'PRSA_data_vola7_MLP14_weights.01-0.0101.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0015.keras', 'PRSA_data_vola7_LSTM_weights.01-0.0040.keras', 'PRSA_data_vola14_MLP21_weights.09-0.0011.keras', 'PRSA_data_price_LSTM21_weights.05-43021.9570.keras', 'PRSA_data_vola28_MLP28_weights.03-0.0007.keras', 'PRSA_data_vola7_LSTM14_weights.10-0.0042.keras', 'PRSA_data_price_LSTM_weights.07-67135.3359.keras', 'PRSA_data_vola7_MLP14_weights.18-0.0020.keras', 'PRSA_data_price_LSTM21_weights.18-42894.1055.keras', 'PRSA_data_vola7_MLP7_weights.01-0.0042.keras', 'PRSA_data_vola14_LSTM_weights.05-0.0024.keras', 'PRSA_data_vola21_MLP14_weights.27-0.0008.keras', 'PRSA_data_vola28_MLP28_weights.02-0.0018.keras', 'PRSA_data_vola21_LSTM7_weights.03-0.0017.keras', 'PRSA_data_price_LSTM_weights.10-67105.4297.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0010.keras', 'PRSA_data_vola7_MLP21_weights.03-0.0030.keras', 'PRSA_data_at_MLP7_weights.06-5.3724.keras', 'PRSA_data_at_LSTM_weights.05-1.1226.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0026.keras', 'PRSA_data_vola21_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola14_MLP21_weights.05-0.0012.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.08-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.47-0.0008.keras', 'PRSA_data_price_LSTM14_weights.16-51296.3633.keras', 'PRSA_data_at_MLP7_weights.02-16.3421.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0030.keras', 'PRSA_data_vola21_MLP21_weights.09-0.0009.keras', 'PRSA_data_at_LSTM14_weights.01-6.2168.keras', 'PRSA_data_vola28_LSTM21_weights.01-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.16-0.0009.keras', 'PRSA_data_vola21_LSTM28_weights.07-0.0016.keras', 'PRSA_data_vola21_MLP28_weights.19-0.0010.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0009.keras', 'PRSA_data_vola14_MLP7_weights.09-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.02-0.0011.keras', 'PRSA_data_vola28_LSTM14_weights.03-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.09-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.09-0.0012.keras', 'PRSA_data_vola28_MLP21_weights.65-0.0007.keras', 'PRSA_data_vola21_MLP28_weights.15-0.0011.keras', 'PRSA_data_at_LSTM28_weights.04-0.7851.keras', 'PRSA_data_at_MLP7_weights.42-0.8694.keras', 'PRSA_data_vola21_LSTM28_weights.18-0.0008.keras', 'PRSA_data_vola21_MLP14_weights.04-0.0008.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0040.keras', 'PRSA_data_vola21_LSTM14_weights.12-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0029.keras', 'PRSA_data_vola21_MLP28_weights.01-0.0026.keras', 'PRSA_data_price_LSTM_weights.13-67075.9453.keras', 'PRSA_data_vola14_MLP7_weights.01-0.0041.keras', 'PRSA_data_price_MLP21_weights.02-1476.1034.keras', 'PRSA_data_vola14_LSTM28_weights.19-0.0014.keras', 'PRSA_data_price_MLP14_weights.96-663.0834.keras', 'PRSA_data_vola7_MLP21_weights.94-0.0024.keras', 'PRSA_data_price_LSTM21_weights.03-43041.9570.keras', 'PRSA_data_price_MLP7_weights.12-3798.9531.keras', 'PRSA_data_price_LSTM28_weights.10-42813.0352.keras', 'PRSA_data_vola14_MLP28_weights.20-0.0014.keras', 'PRSA_data_vola7_MLP21_weights.04-0.0027.keras', 'PRSA_data_vola7_LSTM28_weights.18-0.0035.keras', 'PRSA_data_at_MLP7_weights.10-2.7632.keras', 'PRSA_data_at_LSTM21_weights.18-0.3561.keras', 'PRSA_data_price_LSTM_weights.06-67145.4141.keras', 'PRSA_data_price_LSTM28_weights.13-42783.9805.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM28_weights.01-42902.4570.keras', 'PRSA_data_price_LSTM_weights.20-67005.9609.keras', 'PRSA_data_vola7_LSTM21_weights.04-0.0034.keras', 'PRSA_data_at_MLP14_weights.03-0.0461.keras', 'PRSA_data_at_LSTM21_weights.07-0.5113.keras', 'PRSA_data_vola28_MLP28_weights.01-0.0042.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0455.keras', 'PRSA_data_price_MLP28_weights.17-1182.9105.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0018.keras', 'PRSA_data_vola28_MLP14_weights.38-0.0006.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0013.keras', 'PRSA_data_vola7_LSTM21_weights.01-0.0038.keras', 'PRSA_data_price_LSTM14_weights.17-51286.4570.keras', 'PRSA_data_price_LSTM28_weights.16-42754.8008.keras', 'PRSA_data_price_LSTM14_weights.11-51345.4023.keras', 'PRSA_data_price_MLP14_weights.22-638.4409.keras', 'PRSA_data_vola7_MLP28_weights.32-0.0029.keras', 'PRSA_data_price_MLP7_weights.48-1648.1864.keras', 'PRSA_data_vola28_LSTM28_weights.10-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.03-0.0021.keras', 'PRSA_data_vola21_LSTM21_weights.13-0.0010.keras', 'PRSA_data_at_MLP7_weights.07-4.2627.keras', 'PRSA_data_vola7_LSTM21_weights.17-0.0029.keras', 'PRSA_data_vola21_LSTM28_weights.08-0.0011.keras', 'PRSA_data_price_MLP21_weights.05-1072.5081.keras', 'PRSA_data_at_LSTM21_weights.06-0.6219.keras', 'PRSA_data_vola7_LSTM21_weights.05-0.0030.keras', 'PRSA_data_price_MLP14_weights.29-856.8320.keras', 'PRSA_data_vola14_LSTM21_weights.11-0.0011.keras', 'PRSA_data_price_LSTM21_weights.10-42972.4219.keras', 'PRSA_data_at_LSTM21_weights.19-0.2744.keras', 'PRSA_data_price_LSTM_weights.02-67185.9453.keras', 'PRSA_data_vola28_MLP14_weights.21-0.0007.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.10-0.0009.keras', 'PRSA_data_price_LSTM28_weights.15-42764.5898.keras', 'PRSA_data_at_LSTM14_weights.07-0.7059.keras', 'PRSA_data_vola21_LSTM21_weights.01-0.0048.keras', 'PRSA_data_at_LSTM14_weights.03-1.3422.keras', 'PRSA_data_vola14_LSTM14_weights.02-0.0015.keras', 'PRSA_data_vola14_MLP14_weights.02-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.02-0.0034.keras', 'PRSA_data_vola7_MLP21_weights.21-0.0025.keras', 'PRSA_data_at_MLP7_weights.18-1.8396.keras', 'PRSA_data_price_LSTM_weights.11-67095.6172.keras', 'PRSA_data_at_LSTM14_weights.12-0.6378.keras', 'PRSA_data_vola21_LSTM21_weights.07-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.45-0.0030.keras', 'PRSA_data_at_MLP7_weights.26-0.8897.keras', 'PRSA_data_vola21_MLP7_weights.01-0.0014.keras', 'PRSA_data_price_MLP14_weights.19-982.5739.keras', 'PRSA_data_vola14_LSTM_weights.16-0.0017.keras', 'PRSA_data_price_MLP21_weights.94-813.8954.keras', 'PRSA_data_vola7_LSTM21_weights.20-0.0029.keras', 'PRSA_data_at_LSTM_weights.03-1.4842.keras', 'PRSA_data_vola21_MLP28_weights.02-0.0015.keras', 'PRSA_data_price_LSTM21_weights.12-42952.9453.keras', 'PRSA_data_price_MLP28_weights.17-1019.2816.keras', 'PRSA_data_vola14_LSTM_weights.13-0.0019.keras', 'PRSA_data_vola7_LSTM_weights.02-0.0038.keras', 'PRSA_data_vola7_LSTM28_weights.02-0.0042.keras', 'PRSA_data_at_LSTM28_weights.12-0.0705.keras', 'PRSA_data_at_LSTM14_weights.13-0.2270.keras', 'PRSA_data_price_LSTM_weights.18-67026.2891.keras', 'PRSA_data_vola28_MLP21_weights.66-0.0007.keras', 'PRSA_data_at_MLP14_weights.42-0.0373.keras', 'PRSA_data_vola7_MLP14_weights.38-0.0020.keras', 'PRSA_data_vola7_LSTM21_weights.11-0.0029.keras', 'PRSA_data_vola21_LSTM21_weights.12-0.0010.keras', 'PRSA_data_at_LSTM_weights.14-0.5158.keras', 'PRSA_data_vola14_LSTM21_weights.01-0.0019.keras', 'PRSA_data_vola28_LSTM21_weights.11-0.0009.keras', 'PRSA_data_vola7_LSTM28_weights.05-0.0038.keras', 'PRSA_data_vola14_MLP14_weights.10-0.0005.keras', 'PRSA_data_at_MLP21_weights.03-2.1540.keras', 'PRSA_data_at_LSTM_weights.16-0.5377.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.44-0.0008.keras', 'PRSA_data_at_MLP7_weights.16-1.9941.keras', 'PRSA_data_vola28_LSTM28_weights.01-0.0010.keras', 'PRSA_data_vola28_LSTM14_weights.19-0.0006.keras', 'PRSA_data_price_LSTM21_weights.14-42933.4180.keras', 'PRSA_data_vola14_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.82-828.7976.keras', 'PRSA_data_vola28_MLP21_weights.01-0.0030.keras', 'PRSA_data_price_LSTM14_weights.10-51355.2070.keras', 'PRSA_data_vola21_MLP21_weights.18-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.73-0.0007.keras', 'PRSA_data_vola28_MLP7_weights.20-0.0011.keras', 'PRSA_data_price_LSTM21_weights.13-42943.1719.keras', 'PRSA_data_vola28_MLP28_weights.36-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.04-0.0025.keras', 'PRSA_data_at_MLP14_weights.02-0.2715.keras', 'PRSA_data_price_LSTM14_weights.05-51404.8945.keras', 'PRSA_data_price_LSTM28_weights.19-42725.4297.keras', 'PRSA_data_price_MLP28_weights.01-1561.6656.keras', 'PRSA_data_vola21_MLP7_weights.14-0.0011.keras', 'PRSA_data_vola7_MLP7_weights.23-0.0033.keras', 'PRSA_data_vola14_MLP28_weights.01-0.0026.keras', 'PRSA_data_at_MLP14_weights.29-0.0323.keras', 'PRSA_data_vola7_MLP21_weights.01-0.0033.keras', 'PRSA_data_vola21_LSTM_weights.19-0.0014.keras', 'PRSA_data_price_MLP7_weights.03-5953.8076.keras', 'PRSA_data_price_MLP21_weights.42-948.6628.keras', 'PRSA_data_vola21_LSTM14_weights.07-0.0008.keras', 'PRSA_data_vola14_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola7_LSTM28_weights.13-0.0037.keras', 'PRSA_data_vola21_MLP21_weights.01-0.0016.keras', 'PRSA_data_vola14_LSTM21_weights.08-0.0011.keras', 'PRSA_data_price_LSTM21_weights.06-43011.9570.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0019.keras', 'PRSA_data_vola7_MLP28_weights.16-0.0033.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0008.keras', 'PRSA_data_vola28_MLP14_weights.01-0.0031.keras', 'PRSA_data_vola14_MLP28_weights.11-0.0015.keras', 'PRSA_data_price_LSTM14_weights.03-51425.0000.keras', 'PRSA_data_vola7_LSTM28_weights.04-0.0039.keras', 'PRSA_data_vola28_LSTM7_weights.13-0.0011.keras', 'PRSA_data_price_LSTM14_weights.01-51445.4883.keras', 'PRSA_data_vola28_MLP28_weights.22-0.0006.keras', 'PRSA_data_price_LSTM_weights.09-67115.3359.keras', 'PRSA_data_vola28_LSTM7_weights.14-0.0011.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0050.keras', 'PRSA_data_vola21_MLP28_weights.17-0.0010.keras', 'PRSA_data_vola28_LSTM21_weights.09-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.13-0.0010.keras', 'PRSA_data_vola14_MLP14_weights.01-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.50-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.2184.keras', 'PRSA_data_vola14_LSTM_weights.02-0.0030.keras', 'PRSA_data_vola7_MLP28_weights.39-0.0030.keras', 'PRSA_data_price_LSTM21_weights.04-43031.9414.keras', 'PRSA_data_price_LSTM_weights.15-67056.2109.keras', 'PRSA_data_at_MLP7_weights.20-1.6814.keras', 'PRSA_data_vola28_LSTM7_weights.18-0.0010.keras', 'PRSA_data_at_MLP7_weights.95-0.2130.keras', 'PRSA_data_vola14_LSTM28_weights.03-0.0021.keras', 'PRSA_data_at_LSTM_weights.01-7.5653.keras', 'PRSA_data_at_MLP7_weights.05-7.2110.keras', 'PRSA_data_vola21_LSTM28_weights.20-0.0008.keras', 'PRSA_data_at_LSTM_weights.07-0.8569.keras', 'PRSA_data_vola14_LSTM28_weights.12-0.0014.keras', 'PRSA_data_price_LSTM21_weights.16-42913.7969.keras', 'PRSA_data_at_MLP28_weights.04-1.3772.keras', 'PRSA_data_price_LSTM_weights.08-67125.3047.keras', 'PRSA_data_price_LSTM14_weights.07-51384.8633.keras', 'PRSA_data_price_LSTM_weights.04-67165.6016.keras', 'PRSA_data_vola14_LSTM28_weights.15-0.0014.keras', 'PRSA_data_at_MLP14_weights.01-0.4195.keras', 'PRSA_data_price_LSTM28_weights.09-42822.7578.keras', 'PRSA_data_price_LSTM28_weights.03-42882.1953.keras', 'PRSA_data_at_LSTM14_weights.05-0.8408.keras', 'PRSA_data_vola7_MLP28_weights.08-0.0030.keras', 'PRSA_data_price_MLP14_weights.07-3004.8474.keras', 'PRSA_data_price_LSTM21_weights.15-42923.6367.keras', 'PRSA_data_vola7_MLP21_weights.81-0.0025.keras', 'PRSA_data_price_MLP21_weights.86-819.7106.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.05-0.0012.keras', 'PRSA_data_vola21_MLP14_weights.20-0.0007.keras', 'PRSA_data_vola14_MLP28_weights.42-0.0011.keras', 'PRSA_data_price_LSTM_weights.12-67085.7578.keras', 'PRSA_data_vola28_MLP21_weights.13-0.0008.keras', 'PRSA_data_vola14_MLP28_weights.08-0.0016.keras', 'PRSA_data_at_LSTM28_weights.05-0.5407.keras', 'PRSA_data_at_MLP7_weights.86-0.0883.keras', 'PRSA_data_vola28_LSTM14_weights.01-0.0020.keras', 'PRSA_data_vola21_MLP21_weights.71-0.0010.keras', 'PRSA_data_at_MLP28_weights.09-0.1537.keras', 'PRSA_data_vola7_MLP28_weights.48-0.0030.keras', 'PRSA_data_at_MLP28_weights.12-0.0354.keras', 'PRSA_data_at_LSTM14_weights.04-0.9732.keras', 'PRSA_data_at_LSTM14_weights.15-0.2154.keras', 'PRSA_data_vola14_MLP28_weights.21-0.0013.keras', 'PRSA_data_vola21_MLP14_weights.25-0.0009.keras', 'PRSA_data_vola14_LSTM14_weights.01-0.0027.keras', 'PRSA_data_at_LSTM21_weights.20-0.0855.keras', 'PRSA_data_vola21_LSTM28_weights.01-0.0039.keras', 'PRSA_data_price_MLP14_weights.02-8059.2510.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0013.keras', 'PRSA_data_vola28_MLP7_weights.04-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.02-0.0043.keras', 'PRSA_data_price_MLP14_weights.78-683.9230.keras', 'PRSA_data_price_MLP21_weights.19-1002.5925.keras', 'PRSA_data_vola28_MLP21_weights.92-0.0007.keras', 'PRSA_data_price_LSTM28_weights.14-42774.2500.keras', 'PRSA_data_vola21_MLP21_weights.06-0.0010.keras', 'PRSA_data_vola14_LSTM14_weights.03-0.0010.keras', 'PRSA_data_price_LSTM_weights.16-67046.2422.keras', 'PRSA_data_vola7_LSTM28_weights.07-0.0037.keras', 'PRSA_data_vola28_LSTM7_weights.01-0.0017.keras', 'PRSA_data_vola28_LSTM14_weights.02-0.0008.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0022.keras', 'PRSA_data_at_MLP21_weights.03-0.2039.keras', 'PRSA_data_vola14_LSTM28_weights.02-0.0023.keras', 'PRSA_data_at_MLP7_weights.03-13.2212.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0037.keras', 'PRSA_data_vola14_MLP21_weights.69-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.18-0.0013.keras', 'PRSA_data_vola7_MLP21_weights.49-0.0025.keras', 'PRSA_data_vola28_MLP28_weights.31-0.0006.keras', 'PRSA_data_price_MLP21_weights.77-841.5739.keras', 'PRSA_data_vola28_MLP14_weights.35-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.26-0.0031.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0065.keras', 'PRSA_data_price_LSTM14_weights.02-51435.1133.keras', 'PRSA_data_price_LSTM28_weights.20-42714.7695.keras', 'PRSA_data_vola28_LSTM28_weights.16-0.0006.keras', 'PRSA_data_vola14_LSTM28_weights.06-0.0016.keras', 'PRSA_data_vola21_MLP21_weights.13-0.0010.keras', 'PRSA_data_vola28_MLP14_weights.32-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.01-0.0043.keras', 'PRSA_data_vola21_MLP28_weights.08-0.0011.keras', 'PRSA_data_vola21_LSTM28_weights.10-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.14-0.0012.keras', 'PRSA_data_vola7_LSTM28_weights.01-0.0049.keras', 'PRSA_data_price_LSTM28_weights.20-42715.6250.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM_weights.05-67155.5078.keras', 'PRSA_data_price_MLP14_weights.14-1073.6493.keras', 'PRSA_data_vola14_LSTM_weights.03-0.0029.keras', 'PRSA_data_vola14_MLP7_weights.07-0.0021.keras', 'PRSA_data_at_MLP28_weights.03-0.0637.keras', 'PRSA_data_vola21_MLP28_weights.03-0.0012.keras', 'PRSA_data_at_MLP28_weights.05-0.1823.keras', 'PRSA_data_at_MLP21_weights.01-0.9762.keras', 'PRSA_data_vola7_LSTM28_weights.15-0.0035.keras', 'PRSA_data_price_LSTM21_weights.08-42992.0703.keras', 'PRSA_data_vola21_MLP21_weights.22-0.0010.keras', 'PRSA_data_at_LSTM28_weights.01-6.0769.keras', 'PRSA_data_vola28_LSTM28_weights.20-0.0006.keras', 'PRSA_data_at_MLP21_weights.01-4.3241.keras', 'PRSA_data_at_LSTM_weights.06-0.8671.keras', 'PRSA_data_price_MLP7_weights.37-1654.4709.keras', 'PRSA_data_at_LSTM_weights.20-0.4626.keras', 'PRSA_data_vola21_MLP28_weights.27-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.4729.keras', 'PRSA_data_at_MLP7_weights.24-1.4718.keras', 'PRSA_data_at_MLP7_weights.59-0.4636.keras', 'PRSA_data_vola14_LSTM14_weights.06-0.0005.keras', 'PRSA_data_at_MLP7_weights.14-2.2322.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0196.keras', 'PRSA_data_at_LSTM14_weights.19-0.3356.keras', 'PRSA_data_price_LSTM28_weights.18-42735.2305.keras', 'PRSA_data_at_LSTM21_weights.04-0.8083.keras', 'PRSA_data_price_LSTM14_weights.12-51335.6523.keras', 'PRSA_data_vola21_LSTM_weights.06-0.0017.keras', 'PRSA_data_price_LSTM28_weights.05-42862.2383.keras', 'PRSA_data_price_MLP7_weights.10-1248.9592.keras', 'PRSA_data_vola21_MLP28_weights.35-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.03-0.0013.keras', 'PRSA_data_at_LSTM21_weights.03-1.0323.keras', 'PRSA_data_price_MLP14_weights.04-5028.7607.keras', 'PRSA_data_price_LSTM14_weights.08-51374.8945.keras', 'PRSA_data_vola28_MLP14_weights.48-0.0007.keras']
# Predicciones usando el mejor modelo
if best7_lstm is not None:
train_preds_price_LSTM7 = best7_lstm.predict(X_train_price_lstm_7)
val_preds_price_LSTM7 = best7_lstm.predict(X_val_price_lstm_7)
test_preds_price_LSTM7 = best7_lstm.predict(X_test_price_lstm_7)
# Aplanar las predicciones si es necesario
train_preds_price_LSTM7 = np.squeeze(train_preds_price_LSTM7)
val_preds_price_LSTM7 = np.squeeze(val_preds_price_LSTM7)
test_preds_price_LSTM7 = np.squeeze(test_preds_price_LSTM7)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_price_LSTM7)
print("Predicciones de validación:", val_preds_price_LSTM7)
print("Predicciones de prueba:", test_preds_price_LSTM7)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
1/156 [..............................] - ETA: 26s
81/156 [==============>...............] - ETA: 0s
156/156 [==============================] - 0s 615us/step
1/1 [==============================] - ETA: 0s
1/1 [==============================] - 0s 6ms/step
1/1 [==============================] - ETA: 0s
1/1 [==============================] - 0s 6ms/step
Predicciones de Entrenamiento: [1.6297626e-01 1.6297626e-01 1.6297626e-01 ... 2.0855881e+02 2.0855881e+02
2.0855881e+02]
Predicciones de validación: [208.5588 208.5588 208.5588 208.5588 208.5588 208.5588 208.5588]
Predicciones de prueba: [208.5588 208.5588 208.5588 208.5588 208.5588 208.5588 208.5588]
plot_model(data_train_plot_price7[-100:], data_val_plot_price7, data_test_plot_price7, val_preds_price_LSTM7, test_preds_price_LSTM7, "Predicciones usando Memoria a Corto y Largo Paso (LSTM) para un horizonte de 7 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train7, jarque_bera_pval_LSTM_train7 = diagnostic_plots(y_train_price7, train_preds_price_LSTM7)
Ljung-Box LB Statistic: 4952.809205
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_price_LSTM_train = metricas(y_train_price7,train_preds_price_LSTM7)
metrica_price_LSTM_train.index = metrica_price_LSTM_train.index.map({0: 'LSTM Entrenamiento Price τ = 7'})
metrica_price_LSTM_train['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train7], index=metrica_price_LSTM_train.index)
metrica_price_LSTM_train['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train7], index=metrica_price_LSTM_train.index)
metrica_price_LSTM_train
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Price τ = 7 | 1.7288e+12 | 65.34% | 10422.74 | 3.4777e+08 | -44.87% | 0.0 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM7, jarque_bera_pvalLSTM7 = evaluate_residuals(data_test_plot_price7, test_preds_price_LSTM7)
No se rechaza H0: los residuales son independientes (no correlacionados).
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test = metricas(y_test_price7,test_preds_price_LSTM7)
metrica_LSTM_test.index = metrica_LSTM_test.index.map({0: 'LSTM Prueba Price τ = 7'})
metrica_LSTM_test['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM7], index=metrica_LSTM_test.index)
metrica_LSTM_test['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM7], index=metrica_LSTM_test.index)
metrica_LSTM_test
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Price τ = 7 | 3.4305e+10 | 99.7% | 69961.76 | 4.9007e+09 | -80571.1% | 0.0894 | 0.7343 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_price_LSTM7)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_price7, train_preds_price_LSTM7, y_val_price7, val_preds_price_LSTM7, y_test_price7, test_preds_price_LSTM7)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo, ademas muestra un sesgo considerable a la derecha y una alta variabilidad.
Conjunto de Validación: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 14 días (\(\tau=14\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_price_lstm_14, X_val_price_lstm_14, X_test_price_lstm_14 = change_dimension_lstm(X_train_price14, X_val_price14, X_test_price14)
Shape of 3D arrays X: (4943, 14, 1) (14, 14, 1) (14, 14, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(14,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=10, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_price_lstm_14, y_train_price14)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 5.6s
[CV] END ...activation=relu, epochs=100, learning_rate=0.001; total time= 14.4s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 2.7s
[CV] END ...activation=relu, epochs=100, learning_rate=0.001; total time= 13.3s
[CV] END .......activation=sigmoid, epochs=20, optimizer=SGD; total time= 13.6s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 21.9s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 4.2s
[CV] END ...activation=relu, epochs=100, learning_rate=0.001; total time= 15.0s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 3.1s
[CV] END ...activation=relu, epochs=100, learning_rate=0.001; total time= 13.1s
[CV] END .......activation=sigmoid, epochs=20, optimizer=SGD; total time= 13.6s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 22.0s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 9.0s
[CV] END ...activation=relu, epochs=100, learning_rate=0.001; total time= 11.8s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 3.1s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 2.8s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 4.0s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 9.1s
[CV] END ...activation=relu, epochs=100, learning_rate=0.001; total time= 12.9s
[CV] END .......activation=sigmoid, epochs=20, optimizer=SGD; total time= 13.6s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 10.9s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 5.4s
[CV] END ...activation=relu, epochs=100, learning_rate=0.001; total time= 12.6s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 22.0s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 4.0s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 9.2s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 3.1s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 2.8s
[CV] END ...activation=relu, epochs=100, learning_rate=0.001; total time= 13.0s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 22.2s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 4.2s
[CV] END ...activation=relu, epochs=100, learning_rate=0.001; total time= 15.2s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 2.8s
[CV] END .......activation=sigmoid, epochs=20, optimizer=SGD; total time= 13.6s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 22.2s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 8.6s
[CV] END ...activation=relu, epochs=100, learning_rate=0.001; total time= 11.9s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 3.1s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 2.8s
[CV] END .......activation=sigmoid, epochs=20, optimizer=SGD; total time= 13.2s
---------------------------------------------------------------------------
KeyboardInterrupt Traceback (most recent call last)
Cell In[276], line 29
26 model = KerasRegressor(build_fn=create_lstm_model, epochs=10, batch_size=16, verbose=0)
28 grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
---> 29 grid_result = grid.fit(X_train_price_lstm_14, y_train_price14)
31 # Resultados del Grid Search
32 print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
File /opt/anaconda3/envs/env_st/lib/python3.9/site-packages/sklearn/model_selection/_search.py:910, in BaseSearchCV.fit(self, X, y, groups, **fit_params)
908 refit_start_time = time.time()
909 if y is not None:
--> 910 self.best_estimator_.fit(X, y, **fit_params)
911 else:
912 self.best_estimator_.fit(X, **fit_params)
File /opt/anaconda3/envs/env_st/lib/python3.9/site-packages/keras/wrappers/scikit_learn.py:175, in BaseWrapper.fit(self, x, y, **kwargs)
172 fit_args = copy.deepcopy(self.filter_sk_params(Sequential.fit))
173 fit_args.update(kwargs)
--> 175 history = self.model.fit(x, y, **fit_args)
177 return history
File /opt/anaconda3/envs/env_st/lib/python3.9/site-packages/keras/utils/traceback_utils.py:65, in filter_traceback.<locals>.error_handler(*args, **kwargs)
63 filtered_tb = None
64 try:
---> 65 return fn(*args, **kwargs)
66 except Exception as e:
67 filtered_tb = _process_traceback_frames(e.__traceback__)
File /opt/anaconda3/envs/env_st/lib/python3.9/site-packages/keras/engine/training.py:1685, in Model.fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1677 with tf.profiler.experimental.Trace(
1678 "train",
1679 epoch_num=epoch,
(...)
1682 _r=1,
1683 ):
1684 callbacks.on_train_batch_begin(step)
-> 1685 tmp_logs = self.train_function(iterator)
1686 if data_handler.should_sync:
1687 context.async_wait()
File /opt/anaconda3/envs/env_st/lib/python3.9/site-packages/tensorflow/python/util/traceback_utils.py:150, in filter_traceback.<locals>.error_handler(*args, **kwargs)
148 filtered_tb = None
149 try:
--> 150 return fn(*args, **kwargs)
151 except Exception as e:
152 filtered_tb = _process_traceback_frames(e.__traceback__)
File /opt/anaconda3/envs/env_st/lib/python3.9/site-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py:894, in Function.__call__(self, *args, **kwds)
891 compiler = "xla" if self._jit_compile else "nonXla"
893 with OptionalXlaContext(self._jit_compile):
--> 894 result = self._call(*args, **kwds)
896 new_tracing_count = self.experimental_get_tracing_count()
897 without_tracing = (tracing_count == new_tracing_count)
File /opt/anaconda3/envs/env_st/lib/python3.9/site-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py:926, in Function._call(self, *args, **kwds)
923 self._lock.release()
924 # In this case we have created variables on the first call, so we run the
925 # defunned version which is guaranteed to never create variables.
--> 926 return self._no_variable_creation_fn(*args, **kwds) # pylint: disable=not-callable
927 elif self._variable_creation_fn is not None:
928 # Release the lock early so that multiple threads can perform the call
929 # in parallel.
930 self._lock.release()
File /opt/anaconda3/envs/env_st/lib/python3.9/site-packages/tensorflow/python/eager/polymorphic_function/tracing_compiler.py:143, in TracingCompiler.__call__(self, *args, **kwargs)
140 with self._lock:
141 (concrete_function,
142 filtered_flat_args) = self._maybe_define_function(args, kwargs)
--> 143 return concrete_function._call_flat(
144 filtered_flat_args, captured_inputs=concrete_function.captured_inputs)
File /opt/anaconda3/envs/env_st/lib/python3.9/site-packages/tensorflow/python/eager/polymorphic_function/monomorphic_function.py:1757, in ConcreteFunction._call_flat(self, args, captured_inputs, cancellation_manager)
1753 possible_gradient_type = gradients_util.PossibleTapeGradientTypes(args)
1754 if (possible_gradient_type == gradients_util.POSSIBLE_GRADIENT_TYPES_NONE
1755 and executing_eagerly):
1756 # No tape is watching; skip to running the function.
-> 1757 return self._build_call_outputs(self._inference_function.call(
1758 ctx, args, cancellation_manager=cancellation_manager))
1759 forward_backward = self._select_forward_and_backward_functions(
1760 args,
1761 possible_gradient_type,
1762 executing_eagerly)
1763 forward_function, args_with_tangents = forward_backward.forward()
File /opt/anaconda3/envs/env_st/lib/python3.9/site-packages/tensorflow/python/eager/polymorphic_function/monomorphic_function.py:381, in _EagerDefinedFunction.call(self, ctx, args, cancellation_manager)
379 with _InterpolateFunctionError(self):
380 if cancellation_manager is None:
--> 381 outputs = execute.execute(
382 str(self.signature.name),
383 num_outputs=self._num_outputs,
384 inputs=args,
385 attrs=attrs,
386 ctx=ctx)
387 else:
388 outputs = execute.execute_with_cancellation(
389 str(self.signature.name),
390 num_outputs=self._num_outputs,
(...)
393 ctx=ctx,
394 cancellation_manager=cancellation_manager)
File /opt/anaconda3/envs/env_st/lib/python3.9/site-packages/tensorflow/python/eager/execute.py:52, in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
50 try:
51 ctx.ensure_initialized()
---> 52 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
53 inputs, attrs, num_outputs)
54 except core._NotOkStatusException as e:
55 if name is not None:
KeyboardInterrupt:
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape14 = 14
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM14 = build_models_lstm(input_shape14, neurons_list, dropout_rates, 'SGD')
Model: "model_86"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_87 (InputLayer) [(None, 14, 1)] 0
lstm_36 (LSTM) (None, 14, 64) 16896
lstm_37 (LSTM) (None, 32) 12416
dropout_86 (Dropout) (None, 32) 0
dense_290 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_86"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_87 (InputLayer) [(None, 14, 1)] 0
lstm_36 (LSTM) (None, 14, 64) 16896
lstm_37 (LSTM) (None, 32) 12416
dropout_86 (Dropout) (None, 32) 0
dense_290 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_87"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_88 (InputLayer) [(None, 14, 1)] 0
lstm_38 (LSTM) (None, 14, 64) 16896
lstm_39 (LSTM) (None, 32) 12416
dropout_87 (Dropout) (None, 32) 0
dense_291 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_87"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_88 (InputLayer) [(None, 14, 1)] 0
lstm_38 (LSTM) (None, 14, 64) 16896
lstm_39 (LSTM) (None, 32) 12416
dropout_87 (Dropout) (None, 32) 0
dense_291 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_88"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_89 (InputLayer) [(None, 14, 1)] 0
lstm_40 (LSTM) (None, 14, 64) 16896
lstm_41 (LSTM) (None, 32) 12416
dropout_88 (Dropout) (None, 32) 0
dense_292 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_88"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_89 (InputLayer) [(None, 14, 1)] 0
lstm_40 (LSTM) (None, 14, 64) 16896
lstm_41 (LSTM) (None, 32) 12416
dropout_88 (Dropout) (None, 32) 0
dense_292 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_89"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_90 (InputLayer) [(None, 14, 1)] 0
lstm_42 (LSTM) (None, 14, 64) 16896
lstm_43 (LSTM) (None, 32) 12416
dropout_89 (Dropout) (None, 32) 0
dense_293 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_89"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_90 (InputLayer) [(None, 14, 1)] 0
lstm_42 (LSTM) (None, 14, 64) 16896
lstm_43 (LSTM) (None, 32) 12416
dropout_89 (Dropout) (None, 32) 0
dense_293 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_90"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_91 (InputLayer) [(None, 14, 1)] 0
lstm_44 (LSTM) (None, 14, 64) 16896
lstm_45 (LSTM) (None, 32) 12416
dropout_90 (Dropout) (None, 32) 0
dense_294 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_90"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_91 (InputLayer) [(None, 14, 1)] 0
lstm_44 (LSTM) (None, 14, 64) 16896
lstm_45 (LSTM) (None, 32) 12416
dropout_90 (Dropout) (None, 32) 0
dense_294 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_91"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_92 (InputLayer) [(None, 14, 1)] 0
lstm_46 (LSTM) (None, 14, 64) 16896
lstm_47 (LSTM) (None, 32) 12416
dropout_91 (Dropout) (None, 32) 0
dense_295 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_91"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_92 (InputLayer) [(None, 14, 1)] 0
lstm_46 (LSTM) (None, 14, 64) 16896
lstm_47 (LSTM) (None, 32) 12416
dropout_91 (Dropout) (None, 32) 0
dense_295 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_92"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_93 (InputLayer) [(None, 14, 1)] 0
lstm_48 (LSTM) (None, 14, 64) 16896
lstm_49 (LSTM) (None, 32) 12416
dropout_92 (Dropout) (None, 32) 0
dense_296 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_92"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_93 (InputLayer) [(None, 14, 1)] 0
lstm_48 (LSTM) (None, 14, 64) 16896
lstm_49 (LSTM) (None, 32) 12416
dropout_92 (Dropout) (None, 32) 0
dense_296 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_93"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_94 (InputLayer) [(None, 14, 1)] 0
lstm_50 (LSTM) (None, 14, 64) 16896
lstm_51 (LSTM) (None, 32) 12416
dropout_93 (Dropout) (None, 32) 0
dense_297 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_93"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_94 (InputLayer) [(None, 14, 1)] 0
lstm_50 (LSTM) (None, 14, 64) 16896
lstm_51 (LSTM) (None, 32) 12416
dropout_93 (Dropout) (None, 32) 0
dense_297 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_94"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_95 (InputLayer) [(None, 14, 1)] 0
lstm_52 (LSTM) (None, 14, 64) 16896
lstm_53 (LSTM) (None, 32) 12416
dropout_94 (Dropout) (None, 32) 0
dense_298 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_94"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_95 (InputLayer) [(None, 14, 1)] 0
lstm_52 (LSTM) (None, 14, 64) 16896
lstm_53 (LSTM) (None, 32) 12416
dropout_94 (Dropout) (None, 32) 0
dense_298 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_95"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_96 (InputLayer) [(None, 14, 1)] 0
lstm_54 (LSTM) (None, 14, 64) 16896
lstm_55 (LSTM) (None, 32) 12416
dropout_95 (Dropout) (None, 32) 0
dense_299 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_95"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_96 (InputLayer) [(None, 14, 1)] 0
lstm_54 (LSTM) (None, 14, 64) 16896
lstm_55 (LSTM) (None, 32) 12416
dropout_95 (Dropout) (None, 32) 0
dense_299 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_96"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_97 (InputLayer) [(None, 14, 1)] 0
lstm_56 (LSTM) (None, 14, 64) 16896
lstm_57 (LSTM) (None, 32) 12416
dropout_96 (Dropout) (None, 32) 0
dense_300 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_96"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_97 (InputLayer) [(None, 14, 1)] 0
lstm_56 (LSTM) (None, 14, 64) 16896
lstm_57 (LSTM) (None, 32) 12416
dropout_96 (Dropout) (None, 32) 0
dense_300 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_97"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_98 (InputLayer) [(None, 14, 1)] 0
lstm_58 (LSTM) (None, 14, 64) 16896
lstm_59 (LSTM) (None, 32) 12416
dropout_97 (Dropout) (None, 32) 0
dense_301 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_97"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_98 (InputLayer) [(None, 14, 1)] 0
lstm_58 (LSTM) (None, 14, 64) 16896
lstm_59 (LSTM) (None, 32) 12416
dropout_97 (Dropout) (None, 32) 0
dense_301 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_98"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_99 (InputLayer) [(None, 14, 1)] 0
lstm_60 (LSTM) (None, 14, 64) 16896
lstm_61 (LSTM) (None, 32) 12416
dropout_98 (Dropout) (None, 32) 0
dense_302 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_98"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_99 (InputLayer) [(None, 14, 1)] 0
lstm_60 (LSTM) (None, 14, 64) 16896
lstm_61 (LSTM) (None, 32) 12416
dropout_98 (Dropout) (None, 32) 0
dense_302 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_99"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_100 (InputLayer) [(None, 14, 1)] 0
lstm_62 (LSTM) (None, 14, 64) 16896
lstm_63 (LSTM) (None, 32) 12416
dropout_99 (Dropout) (None, 32) 0
dense_303 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_99"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_100 (InputLayer) [(None, 14, 1)] 0
lstm_62 (LSTM) (None, 14, 64) 16896
lstm_63 (LSTM) (None, 32) 12416
dropout_99 (Dropout) (None, 32) 0
dense_303 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_100"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_101 (InputLayer) [(None, 14, 1)] 0
lstm_64 (LSTM) (None, 14, 64) 16896
lstm_65 (LSTM) (None, 32) 12416
dropout_100 (Dropout) (None, 32) 0
dense_304 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_100"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_101 (InputLayer) [(None, 14, 1)] 0
lstm_64 (LSTM) (None, 14, 64) 16896
lstm_65 (LSTM) (None, 32) 12416
dropout_100 (Dropout) (None, 32) 0
dense_304 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_101"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_102 (InputLayer) [(None, 14, 1)] 0
lstm_66 (LSTM) (None, 14, 64) 16896
lstm_67 (LSTM) (None, 32) 12416
dropout_101 (Dropout) (None, 32) 0
dense_305 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_101"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_102 (InputLayer) [(None, 14, 1)] 0
lstm_66 (LSTM) (None, 14, 64) 16896
lstm_67 (LSTM) (None, 32) 12416
dropout_101 (Dropout) (None, 32) 0
dense_305 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights_at = os.path.join('keras_models', 'PRSA_data_price_LSTM14_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best14_lstm = ModelCheckpoint(save_weights_at, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM7.
import os
from joblib import dump, load
history_price_LSTM14 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM14):
filename = f'history_price_LSTM14_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_price_lstm_14, y=y_train_price14, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best14_lstm], validation_data=(X_val_price_lstm_14, y_val_price14),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_price_LSTM14.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_price_LSTM14_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM14_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM14_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM14_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM14_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM14_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM14_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM14_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM14_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM14_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM14_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM14_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM14_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM14_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM14_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM14_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_price_LSTM14_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best14_lstm = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best14_lstm = load_model(best_model_path) # Cargar el modelo
if best14_lstm is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_price_LSTM14_weights.20-51256.7266.keras con val_loss: 51256.7266
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
print("Archivos en el directorio 'keras_models':", files)
Archivos en el directorio 'keras_models': ['PRSA_data_vola14_MLP7_weights.04-0.0018.keras', 'PRSA_data_price_MLP21_weights.01-3579.4128.keras', 'PRSA_data_price_LSTM21_weights.01-43062.3047.keras', 'PRSA_data_price_LSTM28_weights.02-42892.2305.keras', 'PRSA_data_price_LSTM_weights.19-67016.3047.keras', 'PRSA_data_vola14_MLP21_weights.20-0.0011.keras', 'PRSA_data_price_LSTM14_weights.06-51394.8750.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.07-0.0033.keras', 'PRSA_data_price_LSTM21_weights.02-43052.0469.keras', 'PRSA_data_at_MLP7_weights.80-0.0780.keras', 'PRSA_data_vola21_LSTM28_weights.02-0.0022.keras', 'PRSA_data_vola21_LSTM14_weights.04-0.0011.keras', 'PRSA_data_vola7_LSTM21_weights.18-0.0029.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0013.keras', 'PRSA_data_vola28_MLP14_weights.04-0.0013.keras', 'PRSA_data_price_MLP14_weights.52-626.2623.keras', 'PRSA_data_vola7_MLP7_weights.15-0.0041.keras', 'PRSA_data_price_LSTM14_weights.13-51325.8516.keras', 'PRSA_data_vola14_MLP7_weights.02-0.0023.keras', 'PRSA_data_price_LSTM21_weights.11-42962.6562.keras', 'PRSA_data_vola7_LSTM28_weights.10-0.0037.keras', 'PRSA_data_at_LSTM_weights.04-1.2071.keras', 'PRSA_data_price_MLP7_weights.50-1342.5273.keras', 'PRSA_data_price_MLP14_weights.03-5108.7837.keras', 'PRSA_data_vola28_MLP7_weights.14-0.0012.keras', 'PRSA_data_at_MLP7_weights.11-2.4883.keras', 'PRSA_data_price_LSTM28_weights.08-42832.5430.keras', 'PRSA_data_price_MLP28_weights.01-9242.6553.keras', 'PRSA_data_vola21_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.16-1013.2251.keras', 'PRSA_data_price_MLP28_weights.13-1042.5178.keras', 'PRSA_data_vola28_MLP28_weights.34-0.0006.keras', 'PRSA_data_at_MLP7_weights.01-19.6635.keras', 'PRSA_data_at_MLP7_weights.19-1.8379.keras', 'PRSA_data_at_MLP7_weights.12-2.3991.keras', 'PRSA_data_price_MLP7_weights.15-1346.3275.keras', 'PRSA_data_vola14_MLP28_weights.02-0.0024.keras', 'PRSA_data_vola14_MLP28_weights.07-0.0017.keras', 'PRSA_data_at_MLP28_weights.05-0.0930.keras', 'PRSA_data_price_LSTM14_weights.20-51256.7266.keras', 'PRSA_data_vola21_LSTM21_weights.05-0.0013.keras', 'PRSA_data_price_MLP14_weights.12-1304.7009.keras', 'PRSA_data_vola7_LSTM14_weights.09-0.0044.keras', 'PRSA_data_vola28_MLP28_weights.50-0.0006.keras', 'PRSA_data_vola7_MLP14_weights.15-0.0020.keras', 'PRSA_data_price_MLP14_weights.08-1386.5365.keras', 'PRSA_data_price_LSTM21_weights.09-42982.2383.keras', 'PRSA_data_price_MLP28_weights.11-7366.8994.keras', 'PRSA_data_vola14_MLP7_weights.18-0.0015.keras', 'PRSA_data_price_MLP7_weights.14-1734.7238.keras', 'PRSA_data_price_LSTM14_weights.15-51306.2070.keras', 'PRSA_data_vola21_MLP28_weights.25-0.0010.keras', 'PRSA_data_at_LSTM14_weights.02-2.2175.keras', 'PRSA_data_vola28_LSTM7_weights.08-0.0014.keras', 'PRSA_data_price_MLP28_weights.10-1107.4047.keras', 'PRSA_data_vola28_MLP7_weights.01-0.0068.keras', 'PRSA_data_vola21_LSTM21_weights.03-0.0015.keras', 'PRSA_data_price_MLP7_weights.01-9253.3047.keras', 'PRSA_data_vola21_MLP14_weights.31-0.0007.keras', 'PRSA_data_price_LSTM28_weights.12-42793.6172.keras', 'PRSA_data_price_LSTM14_weights.09-51365.0000.keras', 'PRSA_data_vola28_MLP28_weights.42-0.0006.keras', 'PRSA_data_price_MLP7_weights.05-4792.5054.keras', 'PRSA_data_vola21_LSTM_weights.01-0.0024.keras', 'PRSA_data_price_LSTM28_weights.17-42745.0352.keras', 'PRSA_data_vola28_MLP28_weights.45-0.0006.keras', 'PRSA_data_vola28_MLP14_weights.08-0.0010.keras', 'PRSA_data_at_LSTM_weights.11-0.6498.keras', 'PRSA_data_vola28_MLP28_weights.39-0.0007.keras', 'PRSA_data_at_MLP21_weights.11-0.0326.keras', 'PRSA_data_price_LSTM_weights.20-67006.4141.keras', 'PRSA_data_vola28_MLP7_weights.06-0.0014.keras', 'PRSA_data_at_LSTM14_weights.06-0.8145.keras', 'PRSA_data_at_MLP28_weights.01-2.1902.keras', 'PRSA_data_price_MLP14_weights.46-814.1992.keras', '.DS_Store', 'PRSA_data_vola28_LSTM28_weights.12-0.0006.keras', 'PRSA_data_price_LSTM14_weights.18-51276.5625.keras', 'PRSA_data_vola21_LSTM14_weights.01-0.0078.keras', 'PRSA_data_vola14_MLP21_weights.04-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM14_weights.02-0.0011.keras', 'PRSA_data_at_MLP7_weights.09-2.9793.keras', 'PRSA_data_vola21_MLP14_weights.06-0.0007.keras', 'PRSA_data_vola14_MLP21_weights.78-0.0010.keras', 'PRSA_data_price_MLP14_weights.64-711.9107.keras', 'PRSA_data_vola14_MLP28_weights.31-0.0011.keras', 'PRSA_data_vola14_LSTM_weights.08-0.0020.keras', 'PRSA_data_vola14_LSTM21_weights.02-0.0018.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0014.keras', 'PRSA_data_vola14_MLP14_weights.29-0.0005.keras', 'PRSA_data_price_LSTM_weights.03-67175.7734.keras', 'PRSA_data_vola14_LSTM_weights.01-0.0068.keras', 'PRSA_data_price_LSTM28_weights.11-42803.3086.keras', 'PRSA_data_price_MLP28_weights.15-7107.3306.keras', 'PRSA_data_vola14_MLP21_weights.49-0.0010.keras', 'PRSA_data_vola28_MLP7_weights.07-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0010.keras', 'PRSA_data_at_LSTM14_weights.15-0.4082.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0011.keras', 'PRSA_data_vola14_MLP14_weights.06-0.0006.keras', 'PRSA_data_vola14_LSTM21_weights.06-0.0012.keras', 'PRSA_data_price_LSTM_weights.17-67036.2891.keras', 'PRSA_data_vola28_MLP7_weights.03-0.0018.keras', 'PRSA_data_vola7_MLP28_weights.02-0.0039.keras', 'PRSA_data_vola7_LSTM28_weights.16-0.0035.keras', 'PRSA_data_vola21_MLP7_weights.20-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.33-0.0009.keras', 'PRSA_data_price_MLP21_weights.64-870.7020.keras', 'PRSA_data_vola21_MLP14_weights.22-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.14-0.0020.keras', 'PRSA_data_price_LSTM21_weights.17-42903.9648.keras', 'PRSA_data_vola21_MLP14_weights.01-0.0021.keras', 'PRSA_data_vola21_LSTM7_weights.16-0.0011.keras', 'PRSA_data_at_LSTM14_weights.11-0.6656.keras', 'PRSA_data_vola28_MLP7_weights.02-0.0019.keras', 'PRSA_data_vola14_MLP28_weights.05-0.0018.keras', 'PRSA_data_vola14_LSTM14_weights.08-0.0005.keras', 'PRSA_data_at_MLP21_weights.02-0.8171.keras', 'PRSA_data_price_MLP28_weights.02-8134.2202.keras', 'PRSA_data_price_LSTM28_weights.06-42852.3320.keras', 'PRSA_data_vola7_MLP14_weights.02-0.0036.keras', 'PRSA_data_vola14_LSTM28_weights.01-0.0026.keras', 'PRSA_data_at_MLP7_weights.04-9.8837.keras', 'PRSA_data_price_MLP28_weights.16-1274.4368.keras', 'PRSA_data_price_LSTM21_weights.07-43002.0078.keras', 'PRSA_data_vola7_MLP28_weights.23-0.0032.keras', 'PRSA_data_price_LSTM_weights.01-67196.3359.keras', 'PRSA_data_at_MLP28_weights.02-1.9156.keras', 'PRSA_data_vola14_MLP21_weights.02-0.0016.keras', 'PRSA_data_vola28_MLP14_weights.14-0.0007.keras', 'PRSA_data_price_LSTM28_weights.07-42842.4375.keras', 'PRSA_data_price_MLP14_weights.37-825.1805.keras', 'PRSA_data_vola7_MLP14_weights.07-0.0021.keras', 'PRSA_data_price_LSTM14_weights.04-51414.8945.keras', 'PRSA_data_price_MLP7_weights.42-1446.1339.keras', 'PRSA_data_price_MLP21_weights.57-932.6497.keras', 'PRSA_data_price_LSTM14_weights.19-51266.6133.keras', 'PRSA_data_vola21_MLP14_weights.34-0.0008.keras', 'PRSA_data_price_MLP14_weights.01-10485.8262.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0012.keras', 'PRSA_data_at_LSTM_weights.02-2.4966.keras', 'PRSA_data_vola21_LSTM28_weights.15-0.0009.keras', 'PRSA_data_price_LSTM_weights.20-67006.3047.keras', 'PRSA_data_price_LSTM_weights.14-67066.1016.keras', 'PRSA_data_price_LSTM21_weights.19-42884.2422.keras', 'PRSA_data_vola28_LSTM21_weights.18-0.0008.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0013.keras', 'PRSA_data_price_LSTM28_weights.20-42714.5508.keras', 'PRSA_data_at_LSTM14_weights.13-0.5609.keras', 'PRSA_data_vola28_LSTM28_weights.04-0.0009.keras', 'PRSA_data_vola14_MLP21_weights.01-0.0118.keras', 'PRSA_data_vola7_MLP21_weights.02-0.0032.keras', 'PRSA_data_at_MLP7_weights.74-0.2838.keras', 'PRSA_data_price_MLP28_weights.17-1217.0461.keras', 'PRSA_data_at_MLP21_weights.16-1.1973.keras', 'PRSA_data_vola7_MLP7_weights.02-0.0036.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0015.keras', 'PRSA_data_vola14_LSTM14_weights.04-0.0006.keras', 'PRSA_data_vola28_LSTM28_weights.18-0.0006.keras', 'PRSA_data_price_MLP21_weights.74-805.9489.keras', 'PRSA_data_vola21_MLP14_weights.02-0.0011.keras', 'PRSA_data_vola28_MLP21_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM_weights.11-0.0014.keras', 'PRSA_data_vola7_LSTM14_weights.14-0.0036.keras', 'PRSA_data_at_MLP28_weights.03-1.7712.keras', 'PRSA_data_price_LSTM21_weights.20-42874.3555.keras', 'PRSA_data_vola28_LSTM7_weights.20-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.04-0.0009.keras', 'PRSA_data_at_MLP7_weights.08-3.6716.keras', 'PRSA_data_at_MLP21_weights.04-0.0734.keras', 'PRSA_data_vola21_LSTM28_weights.03-0.0021.keras', 'PRSA_data_vola21_MLP14_weights.35-0.0008.keras', 'PRSA_data_vola28_LSTM7_weights.11-0.0012.keras', 'PRSA_data_price_LSTM28_weights.04-42872.1953.keras', 'PRSA_data_at_LSTM_weights.08-0.4803.keras', 'PRSA_data_vola28_MLP14_weights.05-0.0010.keras', 'PRSA_data_price_LSTM14_weights.14-51316.0625.keras', 'PRSA_data_vola7_MLP14_weights.01-0.0101.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0015.keras', 'PRSA_data_vola7_LSTM_weights.01-0.0040.keras', 'PRSA_data_vola14_MLP21_weights.09-0.0011.keras', 'PRSA_data_price_LSTM21_weights.05-43021.9570.keras', 'PRSA_data_vola28_MLP28_weights.03-0.0007.keras', 'PRSA_data_vola7_LSTM14_weights.10-0.0042.keras', 'PRSA_data_price_LSTM_weights.07-67135.3359.keras', 'PRSA_data_vola7_MLP14_weights.18-0.0020.keras', 'PRSA_data_price_LSTM21_weights.18-42894.1055.keras', 'PRSA_data_vola7_MLP7_weights.01-0.0042.keras', 'PRSA_data_vola14_LSTM_weights.05-0.0024.keras', 'PRSA_data_vola21_MLP14_weights.27-0.0008.keras', 'PRSA_data_vola28_MLP28_weights.02-0.0018.keras', 'PRSA_data_vola21_LSTM7_weights.03-0.0017.keras', 'PRSA_data_price_LSTM_weights.10-67105.4297.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0010.keras', 'PRSA_data_vola7_MLP21_weights.03-0.0030.keras', 'PRSA_data_at_MLP7_weights.06-5.3724.keras', 'PRSA_data_at_LSTM_weights.05-1.1226.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0026.keras', 'PRSA_data_vola21_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola14_MLP21_weights.05-0.0012.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.08-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.47-0.0008.keras', 'PRSA_data_price_LSTM14_weights.16-51296.3633.keras', 'PRSA_data_at_MLP7_weights.02-16.3421.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0030.keras', 'PRSA_data_vola21_MLP21_weights.09-0.0009.keras', 'PRSA_data_at_LSTM14_weights.01-6.2168.keras', 'PRSA_data_vola28_LSTM21_weights.01-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.16-0.0009.keras', 'PRSA_data_vola21_LSTM28_weights.07-0.0016.keras', 'PRSA_data_vola21_MLP28_weights.19-0.0010.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0009.keras', 'PRSA_data_vola14_MLP7_weights.09-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.02-0.0011.keras', 'PRSA_data_vola28_LSTM14_weights.03-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.09-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.09-0.0012.keras', 'PRSA_data_vola28_MLP21_weights.65-0.0007.keras', 'PRSA_data_vola21_MLP28_weights.15-0.0011.keras', 'PRSA_data_at_MLP7_weights.42-0.8694.keras', 'PRSA_data_vola21_LSTM28_weights.18-0.0008.keras', 'PRSA_data_vola21_MLP14_weights.04-0.0008.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0040.keras', 'PRSA_data_vola21_LSTM14_weights.12-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0029.keras', 'PRSA_data_vola21_MLP28_weights.01-0.0026.keras', 'PRSA_data_price_LSTM_weights.13-67075.9453.keras', 'PRSA_data_vola14_MLP7_weights.01-0.0041.keras', 'PRSA_data_price_MLP21_weights.02-1476.1034.keras', 'PRSA_data_vola14_LSTM28_weights.19-0.0014.keras', 'PRSA_data_price_MLP14_weights.96-663.0834.keras', 'PRSA_data_vola7_MLP21_weights.94-0.0024.keras', 'PRSA_data_price_LSTM21_weights.03-43041.9570.keras', 'PRSA_data_price_MLP7_weights.12-3798.9531.keras', 'PRSA_data_price_LSTM28_weights.10-42813.0352.keras', 'PRSA_data_vola14_MLP28_weights.20-0.0014.keras', 'PRSA_data_vola7_MLP21_weights.04-0.0027.keras', 'PRSA_data_vola7_LSTM28_weights.18-0.0035.keras', 'PRSA_data_at_MLP7_weights.10-2.7632.keras', 'PRSA_data_price_LSTM_weights.06-67145.4141.keras', 'PRSA_data_price_LSTM28_weights.13-42783.9805.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM28_weights.01-42902.4570.keras', 'PRSA_data_price_LSTM_weights.20-67005.9609.keras', 'PRSA_data_vola7_LSTM21_weights.04-0.0034.keras', 'PRSA_data_at_MLP14_weights.03-0.0461.keras', 'PRSA_data_vola28_MLP28_weights.01-0.0042.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0455.keras', 'PRSA_data_price_MLP28_weights.17-1182.9105.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0018.keras', 'PRSA_data_vola28_MLP14_weights.38-0.0006.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0013.keras', 'PRSA_data_vola7_LSTM21_weights.01-0.0038.keras', 'PRSA_data_price_LSTM14_weights.17-51286.4570.keras', 'PRSA_data_price_LSTM28_weights.16-42754.8008.keras', 'PRSA_data_price_LSTM14_weights.11-51345.4023.keras', 'PRSA_data_price_MLP14_weights.22-638.4409.keras', 'PRSA_data_vola7_MLP28_weights.32-0.0029.keras', 'PRSA_data_price_MLP7_weights.48-1648.1864.keras', 'PRSA_data_vola28_LSTM28_weights.10-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.03-0.0021.keras', 'PRSA_data_vola21_LSTM21_weights.13-0.0010.keras', 'PRSA_data_at_MLP7_weights.07-4.2627.keras', 'PRSA_data_vola7_LSTM21_weights.17-0.0029.keras', 'PRSA_data_vola21_LSTM28_weights.08-0.0011.keras', 'PRSA_data_price_MLP21_weights.05-1072.5081.keras', 'PRSA_data_vola7_LSTM21_weights.05-0.0030.keras', 'PRSA_data_price_MLP14_weights.29-856.8320.keras', 'PRSA_data_vola14_LSTM21_weights.11-0.0011.keras', 'PRSA_data_price_LSTM21_weights.10-42972.4219.keras', 'PRSA_data_price_LSTM_weights.02-67185.9453.keras', 'PRSA_data_vola28_MLP14_weights.21-0.0007.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.10-0.0009.keras', 'PRSA_data_price_LSTM28_weights.15-42764.5898.keras', 'PRSA_data_at_LSTM14_weights.07-0.7059.keras', 'PRSA_data_vola21_LSTM21_weights.01-0.0048.keras', 'PRSA_data_at_LSTM14_weights.03-1.3422.keras', 'PRSA_data_vola14_LSTM14_weights.02-0.0015.keras', 'PRSA_data_vola14_MLP14_weights.02-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.02-0.0034.keras', 'PRSA_data_vola7_MLP21_weights.21-0.0025.keras', 'PRSA_data_at_MLP7_weights.18-1.8396.keras', 'PRSA_data_price_LSTM_weights.11-67095.6172.keras', 'PRSA_data_at_LSTM14_weights.12-0.6378.keras', 'PRSA_data_vola21_LSTM21_weights.07-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.45-0.0030.keras', 'PRSA_data_at_MLP7_weights.26-0.8897.keras', 'PRSA_data_vola21_MLP7_weights.01-0.0014.keras', 'PRSA_data_price_MLP14_weights.19-982.5739.keras', 'PRSA_data_vola14_LSTM_weights.16-0.0017.keras', 'PRSA_data_price_MLP21_weights.94-813.8954.keras', 'PRSA_data_vola7_LSTM21_weights.20-0.0029.keras', 'PRSA_data_at_LSTM_weights.03-1.4842.keras', 'PRSA_data_vola21_MLP28_weights.02-0.0015.keras', 'PRSA_data_price_LSTM21_weights.12-42952.9453.keras', 'PRSA_data_price_MLP28_weights.17-1019.2816.keras', 'PRSA_data_vola14_LSTM_weights.13-0.0019.keras', 'PRSA_data_vola7_LSTM_weights.02-0.0038.keras', 'PRSA_data_vola7_LSTM28_weights.02-0.0042.keras', 'PRSA_data_at_LSTM14_weights.13-0.2270.keras', 'PRSA_data_price_LSTM_weights.18-67026.2891.keras', 'PRSA_data_vola28_MLP21_weights.66-0.0007.keras', 'PRSA_data_at_MLP14_weights.42-0.0373.keras', 'PRSA_data_vola7_MLP14_weights.38-0.0020.keras', 'PRSA_data_vola7_LSTM21_weights.11-0.0029.keras', 'PRSA_data_vola21_LSTM21_weights.12-0.0010.keras', 'PRSA_data_at_LSTM_weights.14-0.5158.keras', 'PRSA_data_vola14_LSTM21_weights.01-0.0019.keras', 'PRSA_data_vola28_LSTM21_weights.11-0.0009.keras', 'PRSA_data_vola7_LSTM28_weights.05-0.0038.keras', 'PRSA_data_vola14_MLP14_weights.10-0.0005.keras', 'PRSA_data_at_MLP21_weights.03-2.1540.keras', 'PRSA_data_at_LSTM_weights.16-0.5377.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.44-0.0008.keras', 'PRSA_data_at_MLP7_weights.16-1.9941.keras', 'PRSA_data_vola28_LSTM28_weights.01-0.0010.keras', 'PRSA_data_vola28_LSTM14_weights.19-0.0006.keras', 'PRSA_data_price_LSTM21_weights.14-42933.4180.keras', 'PRSA_data_vola14_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.82-828.7976.keras', 'PRSA_data_vola28_MLP21_weights.01-0.0030.keras', 'PRSA_data_price_LSTM14_weights.10-51355.2070.keras', 'PRSA_data_vola21_MLP21_weights.18-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.73-0.0007.keras', 'PRSA_data_vola28_MLP7_weights.20-0.0011.keras', 'PRSA_data_price_LSTM21_weights.13-42943.1719.keras', 'PRSA_data_vola28_MLP28_weights.36-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.04-0.0025.keras', 'PRSA_data_at_MLP14_weights.02-0.2715.keras', 'PRSA_data_price_LSTM14_weights.05-51404.8945.keras', 'PRSA_data_price_LSTM28_weights.19-42725.4297.keras', 'PRSA_data_price_MLP28_weights.01-1561.6656.keras', 'PRSA_data_vola21_MLP7_weights.14-0.0011.keras', 'PRSA_data_vola7_MLP7_weights.23-0.0033.keras', 'PRSA_data_vola14_MLP28_weights.01-0.0026.keras', 'PRSA_data_at_MLP14_weights.29-0.0323.keras', 'PRSA_data_vola7_MLP21_weights.01-0.0033.keras', 'PRSA_data_vola21_LSTM_weights.19-0.0014.keras', 'PRSA_data_price_MLP7_weights.03-5953.8076.keras', 'PRSA_data_price_MLP21_weights.42-948.6628.keras', 'PRSA_data_vola21_LSTM14_weights.07-0.0008.keras', 'PRSA_data_vola14_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola7_LSTM28_weights.13-0.0037.keras', 'PRSA_data_vola21_MLP21_weights.01-0.0016.keras', 'PRSA_data_vola14_LSTM21_weights.08-0.0011.keras', 'PRSA_data_price_LSTM21_weights.06-43011.9570.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0019.keras', 'PRSA_data_vola7_MLP28_weights.16-0.0033.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0008.keras', 'PRSA_data_vola28_MLP14_weights.01-0.0031.keras', 'PRSA_data_vola14_MLP28_weights.11-0.0015.keras', 'PRSA_data_price_LSTM14_weights.03-51425.0000.keras', 'PRSA_data_vola7_LSTM28_weights.04-0.0039.keras', 'PRSA_data_vola28_LSTM7_weights.13-0.0011.keras', 'PRSA_data_price_LSTM14_weights.01-51445.4883.keras', 'PRSA_data_vola28_MLP28_weights.22-0.0006.keras', 'PRSA_data_price_LSTM_weights.09-67115.3359.keras', 'PRSA_data_vola28_LSTM7_weights.14-0.0011.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0050.keras', 'PRSA_data_vola21_MLP28_weights.17-0.0010.keras', 'PRSA_data_vola28_LSTM21_weights.09-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.13-0.0010.keras', 'PRSA_data_vola14_MLP14_weights.01-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.50-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.2184.keras', 'PRSA_data_vola14_LSTM_weights.02-0.0030.keras', 'PRSA_data_vola7_MLP28_weights.39-0.0030.keras', 'PRSA_data_price_LSTM21_weights.04-43031.9414.keras', 'PRSA_data_price_LSTM_weights.15-67056.2109.keras', 'PRSA_data_at_MLP7_weights.20-1.6814.keras', 'PRSA_data_vola28_LSTM7_weights.18-0.0010.keras', 'PRSA_data_at_MLP7_weights.95-0.2130.keras', 'PRSA_data_vola14_LSTM28_weights.03-0.0021.keras', 'PRSA_data_at_LSTM_weights.01-7.5653.keras', 'PRSA_data_at_MLP7_weights.05-7.2110.keras', 'PRSA_data_vola21_LSTM28_weights.20-0.0008.keras', 'PRSA_data_at_LSTM_weights.07-0.8569.keras', 'PRSA_data_vola14_LSTM28_weights.12-0.0014.keras', 'PRSA_data_price_LSTM21_weights.16-42913.7969.keras', 'PRSA_data_at_MLP28_weights.04-1.3772.keras', 'PRSA_data_price_LSTM_weights.08-67125.3047.keras', 'PRSA_data_price_LSTM14_weights.07-51384.8633.keras', 'PRSA_data_price_LSTM_weights.04-67165.6016.keras', 'PRSA_data_vola14_LSTM28_weights.15-0.0014.keras', 'PRSA_data_at_MLP14_weights.01-0.4195.keras', 'PRSA_data_price_LSTM28_weights.09-42822.7578.keras', 'PRSA_data_price_LSTM28_weights.03-42882.1953.keras', 'PRSA_data_at_LSTM14_weights.05-0.8408.keras', 'PRSA_data_vola7_MLP28_weights.08-0.0030.keras', 'PRSA_data_price_MLP14_weights.07-3004.8474.keras', 'PRSA_data_price_LSTM21_weights.15-42923.6367.keras', 'PRSA_data_vola7_MLP21_weights.81-0.0025.keras', 'PRSA_data_price_MLP21_weights.86-819.7106.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.05-0.0012.keras', 'PRSA_data_vola21_MLP14_weights.20-0.0007.keras', 'PRSA_data_vola14_MLP28_weights.42-0.0011.keras', 'PRSA_data_price_LSTM_weights.12-67085.7578.keras', 'PRSA_data_vola28_MLP21_weights.13-0.0008.keras', 'PRSA_data_vola14_MLP28_weights.08-0.0016.keras', 'PRSA_data_at_MLP7_weights.86-0.0883.keras', 'PRSA_data_vola28_LSTM14_weights.01-0.0020.keras', 'PRSA_data_vola21_MLP21_weights.71-0.0010.keras', 'PRSA_data_at_MLP28_weights.09-0.1537.keras', 'PRSA_data_vola7_MLP28_weights.48-0.0030.keras', 'PRSA_data_at_MLP28_weights.12-0.0354.keras', 'PRSA_data_at_LSTM14_weights.04-0.9732.keras', 'PRSA_data_at_LSTM14_weights.15-0.2154.keras', 'PRSA_data_vola14_MLP28_weights.21-0.0013.keras', 'PRSA_data_vola21_MLP14_weights.25-0.0009.keras', 'PRSA_data_vola14_LSTM14_weights.01-0.0027.keras', 'PRSA_data_vola21_LSTM28_weights.01-0.0039.keras', 'PRSA_data_price_MLP14_weights.02-8059.2510.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0013.keras', 'PRSA_data_vola28_MLP7_weights.04-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.02-0.0043.keras', 'PRSA_data_price_MLP14_weights.78-683.9230.keras', 'PRSA_data_price_MLP21_weights.19-1002.5925.keras', 'PRSA_data_vola28_MLP21_weights.92-0.0007.keras', 'PRSA_data_price_LSTM28_weights.14-42774.2500.keras', 'PRSA_data_vola21_MLP21_weights.06-0.0010.keras', 'PRSA_data_vola14_LSTM14_weights.03-0.0010.keras', 'PRSA_data_price_LSTM_weights.16-67046.2422.keras', 'PRSA_data_vola7_LSTM28_weights.07-0.0037.keras', 'PRSA_data_vola28_LSTM7_weights.01-0.0017.keras', 'PRSA_data_vola28_LSTM14_weights.02-0.0008.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0022.keras', 'PRSA_data_at_MLP21_weights.03-0.2039.keras', 'PRSA_data_vola14_LSTM28_weights.02-0.0023.keras', 'PRSA_data_at_MLP7_weights.03-13.2212.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0037.keras', 'PRSA_data_vola14_MLP21_weights.69-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.18-0.0013.keras', 'PRSA_data_vola7_MLP21_weights.49-0.0025.keras', 'PRSA_data_vola28_MLP28_weights.31-0.0006.keras', 'PRSA_data_price_MLP21_weights.77-841.5739.keras', 'PRSA_data_vola28_MLP14_weights.35-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.26-0.0031.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0065.keras', 'PRSA_data_price_LSTM14_weights.02-51435.1133.keras', 'PRSA_data_price_LSTM28_weights.20-42714.7695.keras', 'PRSA_data_vola28_LSTM28_weights.16-0.0006.keras', 'PRSA_data_vola14_LSTM28_weights.06-0.0016.keras', 'PRSA_data_vola21_MLP21_weights.13-0.0010.keras', 'PRSA_data_vola28_MLP14_weights.32-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.01-0.0043.keras', 'PRSA_data_vola21_MLP28_weights.08-0.0011.keras', 'PRSA_data_vola21_LSTM28_weights.10-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.14-0.0012.keras', 'PRSA_data_vola7_LSTM28_weights.01-0.0049.keras', 'PRSA_data_price_LSTM28_weights.20-42715.6250.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM_weights.05-67155.5078.keras', 'PRSA_data_price_MLP14_weights.14-1073.6493.keras', 'PRSA_data_vola14_LSTM_weights.03-0.0029.keras', 'PRSA_data_vola14_MLP7_weights.07-0.0021.keras', 'PRSA_data_at_MLP28_weights.03-0.0637.keras', 'PRSA_data_vola21_MLP28_weights.03-0.0012.keras', 'PRSA_data_at_MLP28_weights.05-0.1823.keras', 'PRSA_data_at_MLP21_weights.01-0.9762.keras', 'PRSA_data_vola7_LSTM28_weights.15-0.0035.keras', 'PRSA_data_price_LSTM21_weights.08-42992.0703.keras', 'PRSA_data_vola21_MLP21_weights.22-0.0010.keras', 'PRSA_data_vola28_LSTM28_weights.20-0.0006.keras', 'PRSA_data_at_MLP21_weights.01-4.3241.keras', 'PRSA_data_at_LSTM_weights.06-0.8671.keras', 'PRSA_data_price_MLP7_weights.37-1654.4709.keras', 'PRSA_data_at_LSTM_weights.20-0.4626.keras', 'PRSA_data_vola21_MLP28_weights.27-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.4729.keras', 'PRSA_data_at_MLP7_weights.24-1.4718.keras', 'PRSA_data_at_MLP7_weights.59-0.4636.keras', 'PRSA_data_vola14_LSTM14_weights.06-0.0005.keras', 'PRSA_data_at_MLP7_weights.14-2.2322.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0196.keras', 'PRSA_data_at_LSTM14_weights.19-0.3356.keras', 'PRSA_data_price_LSTM28_weights.18-42735.2305.keras', 'PRSA_data_price_LSTM14_weights.12-51335.6523.keras', 'PRSA_data_vola21_LSTM_weights.06-0.0017.keras', 'PRSA_data_price_LSTM28_weights.05-42862.2383.keras', 'PRSA_data_price_MLP7_weights.10-1248.9592.keras', 'PRSA_data_vola21_MLP28_weights.35-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.03-0.0013.keras', 'PRSA_data_price_MLP14_weights.04-5028.7607.keras', 'PRSA_data_price_LSTM14_weights.08-51374.8945.keras', 'PRSA_data_vola28_MLP14_weights.48-0.0007.keras']
# Predicciones usando el mejor modelo
if best14_lstm is not None:
train_preds_price_LSTM14 = best14_lstm.predict(X_train_price_lstm_14)
val_preds_price_LSTM14 = best14_lstm.predict(X_val_price_lstm_14)
test_preds_price_LSTM14 = best14_lstm.predict(X_test_price_lstm_14)
# Aplanar las predicciones si es necesario
train_preds_price_LSTM14 = np.squeeze(train_preds_price_LSTM14)
val_preds_price_LSTM14 = np.squeeze(val_preds_price_LSTM14)
test_preds_price_LSTM14 = np.squeeze(test_preds_price_LSTM14)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_price_LSTM14)
print("Predicciones de validación:", val_preds_price_LSTM14)
print("Predicciones de prueba:", test_preds_price_LSTM14)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
155/155 [==============================] - 1s 1ms/step
1/1 [==============================] - 0s 12ms/step
1/1 [==============================] - 0s 10ms/step
Predicciones de Entrenamiento: [-7.8990936e-02 -7.8990936e-02 -7.8990936e-02 ... 2.0750458e+02
2.0750458e+02 2.0750458e+02]
Predicciones de validación: [207.50458 207.50458 207.50458 207.50458 207.50458 207.50458 207.50458
207.50458 207.50458 207.50458 207.50458 207.50458 207.50458 207.50458]
Predicciones de prueba: [207.50458 207.50458 207.50458 207.50458 207.50458 207.50458 207.50458
207.50458 207.50458 207.50458 207.50458 207.50458 207.50458 207.50458]
plot_model(data_train_plot_price14[-100:], data_val_plot_price14, data_test_plot_price14, val_preds_price_LSTM14, test_preds_price_LSTM14, "Predicciones usando Memoria a Corto y Largo Paso (LSTM) para un horizonte de 14 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train14, jarque_bera_pval_LSTM_train14 = diagnostic_plots(y_train_price14, train_preds_price_LSTM14)
Ljung-Box LB Statistic: 4930.137123
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_price_LSTM_train14 = metricas(y_train_price14,train_preds_price_LSTM14)
metrica_price_LSTM_train14.index = metrica_price_LSTM_train14.index.map({0: 'LSTM Entrenamiento Price τ = 14'})
metrica_price_LSTM_train14['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train14], index=metrica_price_LSTM_train14.index)
metrica_price_LSTM_train14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train14], index=metrica_price_LSTM_train14.index)
metrica_price_LSTM_train14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Price τ = 14 | 1.6667e+12 | 67.82% | 10252.25 | 3.3718e+08 | -44.73% | 0.0 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM14, jarque_bera_pvalLSTM14 = evaluate_residuals(data_test_plot_price14, test_preds_price_LSTM14)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test14 = metricas(y_test_price14,test_preds_price_LSTM14)
metrica_LSTM_test14.index = metrica_LSTM_test14.index.map({0: 'LSTM Prueba Price τ = 7'})
metrica_LSTM_test14['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM14], index=metrica_LSTM_test14.index)
metrica_LSTM_test14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM14], index=metrica_LSTM_test14.index)
metrica_LSTM_test14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Price τ = 7 | 5.6866e+10 | 99.67% | 63593.77 | 4.0619e+09 | -22868.46% | 0.0249 | 0.7014 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_price_LSTM14)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_price14, train_preds_price_LSTM14, y_val_price14, val_preds_price_LSTM14, y_test_price14, test_preds_price_LSTM14)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo, ademas muestra un sesgo considerable a la derecha y una alta variabilidad.
Conjunto de Validación: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 21 días (\(\tau=21\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_price_lstm_21, X_val_price_lstm_21, X_test_price_lstm_21 = change_dimension_lstm(X_train_price21, X_val_price21, X_test_price21)
Shape of 3D arrays X: (4915, 21, 1) (21, 21, 1) (21, 21, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(21,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['tanh'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=10, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_price_lstm_21, y_train_price21)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=tanh, epochs=20, optimizer=SGD; total time= 37.2s
[CV] END ..........activation=tanh, epochs=20, optimizer=SGD; total time= 37.3s
[CV] END ..........activation=tanh, epochs=20, optimizer=SGD; total time= 37.4s
[CV] END ..........activation=tanh, epochs=20, optimizer=SGD; total time= 37.7s
[CV] END ..........activation=tanh, epochs=20, optimizer=SGD; total time= 38.3s
Mejor función de activación: tanh
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 10191.978701307078
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape21 = 21
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM21 = build_models_lstm(input_shape21, neurons_list, dropout_rates, 'SGD')
Model: "model_103"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_104 (InputLayer) [(None, 21, 1)] 0
lstm_70 (LSTM) (None, 21, 64) 16896
lstm_71 (LSTM) (None, 32) 12416
dropout_103 (Dropout) (None, 32) 0
dense_307 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_103"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_104 (InputLayer) [(None, 21, 1)] 0
lstm_70 (LSTM) (None, 21, 64) 16896
lstm_71 (LSTM) (None, 32) 12416
dropout_103 (Dropout) (None, 32) 0
dense_307 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_104"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_105 (InputLayer) [(None, 21, 1)] 0
lstm_72 (LSTM) (None, 21, 64) 16896
lstm_73 (LSTM) (None, 32) 12416
dropout_104 (Dropout) (None, 32) 0
dense_308 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_104"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_105 (InputLayer) [(None, 21, 1)] 0
lstm_72 (LSTM) (None, 21, 64) 16896
lstm_73 (LSTM) (None, 32) 12416
dropout_104 (Dropout) (None, 32) 0
dense_308 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_105"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_106 (InputLayer) [(None, 21, 1)] 0
lstm_74 (LSTM) (None, 21, 64) 16896
lstm_75 (LSTM) (None, 32) 12416
dropout_105 (Dropout) (None, 32) 0
dense_309 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_105"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_106 (InputLayer) [(None, 21, 1)] 0
lstm_74 (LSTM) (None, 21, 64) 16896
lstm_75 (LSTM) (None, 32) 12416
dropout_105 (Dropout) (None, 32) 0
dense_309 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_106"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_107 (InputLayer) [(None, 21, 1)] 0
lstm_76 (LSTM) (None, 21, 64) 16896
lstm_77 (LSTM) (None, 32) 12416
dropout_106 (Dropout) (None, 32) 0
dense_310 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_106"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_107 (InputLayer) [(None, 21, 1)] 0
lstm_76 (LSTM) (None, 21, 64) 16896
lstm_77 (LSTM) (None, 32) 12416
dropout_106 (Dropout) (None, 32) 0
dense_310 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_107"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_108 (InputLayer) [(None, 21, 1)] 0
lstm_78 (LSTM) (None, 21, 64) 16896
lstm_79 (LSTM) (None, 32) 12416
dropout_107 (Dropout) (None, 32) 0
dense_311 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_107"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_108 (InputLayer) [(None, 21, 1)] 0
lstm_78 (LSTM) (None, 21, 64) 16896
lstm_79 (LSTM) (None, 32) 12416
dropout_107 (Dropout) (None, 32) 0
dense_311 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_108"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_109 (InputLayer) [(None, 21, 1)] 0
lstm_80 (LSTM) (None, 21, 64) 16896
lstm_81 (LSTM) (None, 32) 12416
dropout_108 (Dropout) (None, 32) 0
dense_312 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_108"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_109 (InputLayer) [(None, 21, 1)] 0
lstm_80 (LSTM) (None, 21, 64) 16896
lstm_81 (LSTM) (None, 32) 12416
dropout_108 (Dropout) (None, 32) 0
dense_312 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_109"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_110 (InputLayer) [(None, 21, 1)] 0
lstm_82 (LSTM) (None, 21, 64) 16896
lstm_83 (LSTM) (None, 32) 12416
dropout_109 (Dropout) (None, 32) 0
dense_313 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_109"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_110 (InputLayer) [(None, 21, 1)] 0
lstm_82 (LSTM) (None, 21, 64) 16896
lstm_83 (LSTM) (None, 32) 12416
dropout_109 (Dropout) (None, 32) 0
dense_313 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_110"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_111 (InputLayer) [(None, 21, 1)] 0
lstm_84 (LSTM) (None, 21, 64) 16896
lstm_85 (LSTM) (None, 32) 12416
dropout_110 (Dropout) (None, 32) 0
dense_314 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_110"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_111 (InputLayer) [(None, 21, 1)] 0
lstm_84 (LSTM) (None, 21, 64) 16896
lstm_85 (LSTM) (None, 32) 12416
dropout_110 (Dropout) (None, 32) 0
dense_314 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_111"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_112 (InputLayer) [(None, 21, 1)] 0
lstm_86 (LSTM) (None, 21, 64) 16896
lstm_87 (LSTM) (None, 32) 12416
dropout_111 (Dropout) (None, 32) 0
dense_315 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_111"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_112 (InputLayer) [(None, 21, 1)] 0
lstm_86 (LSTM) (None, 21, 64) 16896
lstm_87 (LSTM) (None, 32) 12416
dropout_111 (Dropout) (None, 32) 0
dense_315 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_112"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_113 (InputLayer) [(None, 21, 1)] 0
lstm_88 (LSTM) (None, 21, 64) 16896
lstm_89 (LSTM) (None, 32) 12416
dropout_112 (Dropout) (None, 32) 0
dense_316 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_112"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_113 (InputLayer) [(None, 21, 1)] 0
lstm_88 (LSTM) (None, 21, 64) 16896
lstm_89 (LSTM) (None, 32) 12416
dropout_112 (Dropout) (None, 32) 0
dense_316 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_113"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_114 (InputLayer) [(None, 21, 1)] 0
lstm_90 (LSTM) (None, 21, 64) 16896
lstm_91 (LSTM) (None, 32) 12416
dropout_113 (Dropout) (None, 32) 0
dense_317 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_113"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_114 (InputLayer) [(None, 21, 1)] 0
lstm_90 (LSTM) (None, 21, 64) 16896
lstm_91 (LSTM) (None, 32) 12416
dropout_113 (Dropout) (None, 32) 0
dense_317 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_114"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_115 (InputLayer) [(None, 21, 1)] 0
lstm_92 (LSTM) (None, 21, 64) 16896
lstm_93 (LSTM) (None, 32) 12416
dropout_114 (Dropout) (None, 32) 0
dense_318 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_114"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_115 (InputLayer) [(None, 21, 1)] 0
lstm_92 (LSTM) (None, 21, 64) 16896
lstm_93 (LSTM) (None, 32) 12416
dropout_114 (Dropout) (None, 32) 0
dense_318 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_115"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_116 (InputLayer) [(None, 21, 1)] 0
lstm_94 (LSTM) (None, 21, 64) 16896
lstm_95 (LSTM) (None, 32) 12416
dropout_115 (Dropout) (None, 32) 0
dense_319 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_115"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_116 (InputLayer) [(None, 21, 1)] 0
lstm_94 (LSTM) (None, 21, 64) 16896
lstm_95 (LSTM) (None, 32) 12416
dropout_115 (Dropout) (None, 32) 0
dense_319 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_116"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_117 (InputLayer) [(None, 21, 1)] 0
lstm_96 (LSTM) (None, 21, 64) 16896
lstm_97 (LSTM) (None, 32) 12416
dropout_116 (Dropout) (None, 32) 0
dense_320 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_116"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_117 (InputLayer) [(None, 21, 1)] 0
lstm_96 (LSTM) (None, 21, 64) 16896
lstm_97 (LSTM) (None, 32) 12416
dropout_116 (Dropout) (None, 32) 0
dense_320 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_117"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_118 (InputLayer) [(None, 21, 1)] 0
lstm_98 (LSTM) (None, 21, 64) 16896
lstm_99 (LSTM) (None, 32) 12416
dropout_117 (Dropout) (None, 32) 0
dense_321 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_117"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_118 (InputLayer) [(None, 21, 1)] 0
lstm_98 (LSTM) (None, 21, 64) 16896
lstm_99 (LSTM) (None, 32) 12416
dropout_117 (Dropout) (None, 32) 0
dense_321 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_118"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_119 (InputLayer) [(None, 21, 1)] 0
lstm_100 (LSTM) (None, 21, 64) 16896
lstm_101 (LSTM) (None, 32) 12416
dropout_118 (Dropout) (None, 32) 0
dense_322 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_118"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_119 (InputLayer) [(None, 21, 1)] 0
lstm_100 (LSTM) (None, 21, 64) 16896
lstm_101 (LSTM) (None, 32) 12416
dropout_118 (Dropout) (None, 32) 0
dense_322 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights_at = os.path.join('keras_models', 'PRSA_data_price_LSTM21_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best21_lstm = ModelCheckpoint(save_weights_at, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM21.
import os
from joblib import dump, load
history_price_LSTM21 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM21):
filename = f'history_price_LSTM21_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_price_lstm_21, y=y_train_price21, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best21_lstm], validation_data=(X_val_price_lstm_21, y_val_price21),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_price_LSTM21.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_price_LSTM21_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM21_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM21_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM21_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM21_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM21_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM21_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM21_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM21_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM21_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM21_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM21_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM21_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM21_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM21_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM21_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_price_LSTM21_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best21_lstm = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best21_lstm = load_model(best_model_path) # Cargar el modelo
if best21_lstm is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_price_LSTM21_weights.20-42874.3555.keras con val_loss: 42874.3555
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
print("Archivos en el directorio 'keras_models':", files)
Archivos en el directorio 'keras_models': ['PRSA_data_vola14_MLP7_weights.04-0.0018.keras', 'PRSA_data_price_MLP21_weights.01-3579.4128.keras', 'PRSA_data_price_LSTM21_weights.01-43062.3047.keras', 'PRSA_data_price_LSTM28_weights.02-42892.2305.keras', 'PRSA_data_price_LSTM_weights.19-67016.3047.keras', 'PRSA_data_vola14_MLP21_weights.20-0.0011.keras', 'PRSA_data_price_LSTM14_weights.06-51394.8750.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.07-0.0033.keras', 'PRSA_data_price_LSTM21_weights.02-43052.0469.keras', 'PRSA_data_at_MLP7_weights.80-0.0780.keras', 'PRSA_data_vola21_LSTM28_weights.02-0.0022.keras', 'PRSA_data_vola21_LSTM14_weights.04-0.0011.keras', 'PRSA_data_vola7_LSTM21_weights.18-0.0029.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0013.keras', 'PRSA_data_vola28_MLP14_weights.04-0.0013.keras', 'PRSA_data_price_MLP14_weights.52-626.2623.keras', 'PRSA_data_vola7_MLP7_weights.15-0.0041.keras', 'PRSA_data_price_LSTM14_weights.13-51325.8516.keras', 'PRSA_data_vola14_MLP7_weights.02-0.0023.keras', 'PRSA_data_price_LSTM21_weights.11-42962.6562.keras', 'PRSA_data_vola7_LSTM28_weights.10-0.0037.keras', 'PRSA_data_at_LSTM_weights.04-1.2071.keras', 'PRSA_data_price_MLP7_weights.50-1342.5273.keras', 'PRSA_data_price_MLP14_weights.03-5108.7837.keras', 'PRSA_data_vola28_MLP7_weights.14-0.0012.keras', 'PRSA_data_at_MLP7_weights.11-2.4883.keras', 'PRSA_data_price_LSTM28_weights.08-42832.5430.keras', 'PRSA_data_price_MLP28_weights.01-9242.6553.keras', 'PRSA_data_vola21_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.16-1013.2251.keras', 'PRSA_data_price_MLP28_weights.13-1042.5178.keras', 'PRSA_data_vola28_MLP28_weights.34-0.0006.keras', 'PRSA_data_at_MLP7_weights.01-19.6635.keras', 'PRSA_data_at_MLP7_weights.19-1.8379.keras', 'PRSA_data_at_MLP7_weights.12-2.3991.keras', 'PRSA_data_price_MLP7_weights.15-1346.3275.keras', 'PRSA_data_vola14_MLP28_weights.02-0.0024.keras', 'PRSA_data_vola14_MLP28_weights.07-0.0017.keras', 'PRSA_data_at_MLP28_weights.05-0.0930.keras', 'PRSA_data_price_LSTM14_weights.20-51256.7266.keras', 'PRSA_data_vola21_LSTM21_weights.05-0.0013.keras', 'PRSA_data_price_MLP14_weights.12-1304.7009.keras', 'PRSA_data_vola7_LSTM14_weights.09-0.0044.keras', 'PRSA_data_vola28_MLP28_weights.50-0.0006.keras', 'PRSA_data_vola7_MLP14_weights.15-0.0020.keras', 'PRSA_data_price_MLP14_weights.08-1386.5365.keras', 'PRSA_data_price_LSTM21_weights.09-42982.2383.keras', 'PRSA_data_price_MLP28_weights.11-7366.8994.keras', 'PRSA_data_vola14_MLP7_weights.18-0.0015.keras', 'PRSA_data_price_MLP7_weights.14-1734.7238.keras', 'PRSA_data_price_LSTM14_weights.15-51306.2070.keras', 'PRSA_data_vola21_MLP28_weights.25-0.0010.keras', 'PRSA_data_at_LSTM14_weights.02-2.2175.keras', 'PRSA_data_vola28_LSTM7_weights.08-0.0014.keras', 'PRSA_data_price_MLP28_weights.10-1107.4047.keras', 'PRSA_data_vola28_MLP7_weights.01-0.0068.keras', 'PRSA_data_vola21_LSTM21_weights.03-0.0015.keras', 'PRSA_data_price_MLP7_weights.01-9253.3047.keras', 'PRSA_data_vola21_MLP14_weights.31-0.0007.keras', 'PRSA_data_price_LSTM28_weights.12-42793.6172.keras', 'PRSA_data_price_LSTM14_weights.09-51365.0000.keras', 'PRSA_data_vola28_MLP28_weights.42-0.0006.keras', 'PRSA_data_price_MLP7_weights.05-4792.5054.keras', 'PRSA_data_vola21_LSTM_weights.01-0.0024.keras', 'PRSA_data_price_LSTM28_weights.17-42745.0352.keras', 'PRSA_data_vola28_MLP28_weights.45-0.0006.keras', 'PRSA_data_vola28_MLP14_weights.08-0.0010.keras', 'PRSA_data_at_LSTM_weights.11-0.6498.keras', 'PRSA_data_vola28_MLP28_weights.39-0.0007.keras', 'PRSA_data_at_MLP21_weights.11-0.0326.keras', 'PRSA_data_price_LSTM_weights.20-67006.4141.keras', 'PRSA_data_vola28_MLP7_weights.06-0.0014.keras', 'PRSA_data_at_LSTM14_weights.06-0.8145.keras', 'PRSA_data_at_MLP28_weights.01-2.1902.keras', 'PRSA_data_price_MLP14_weights.46-814.1992.keras', '.DS_Store', 'PRSA_data_vola28_LSTM28_weights.12-0.0006.keras', 'PRSA_data_price_LSTM14_weights.18-51276.5625.keras', 'PRSA_data_vola21_LSTM14_weights.01-0.0078.keras', 'PRSA_data_vola14_MLP21_weights.04-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM14_weights.02-0.0011.keras', 'PRSA_data_at_MLP7_weights.09-2.9793.keras', 'PRSA_data_vola21_MLP14_weights.06-0.0007.keras', 'PRSA_data_vola14_MLP21_weights.78-0.0010.keras', 'PRSA_data_price_MLP14_weights.64-711.9107.keras', 'PRSA_data_vola14_MLP28_weights.31-0.0011.keras', 'PRSA_data_vola14_LSTM_weights.08-0.0020.keras', 'PRSA_data_vola14_LSTM21_weights.02-0.0018.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0014.keras', 'PRSA_data_vola14_MLP14_weights.29-0.0005.keras', 'PRSA_data_price_LSTM_weights.03-67175.7734.keras', 'PRSA_data_vola14_LSTM_weights.01-0.0068.keras', 'PRSA_data_price_LSTM28_weights.11-42803.3086.keras', 'PRSA_data_price_MLP28_weights.15-7107.3306.keras', 'PRSA_data_vola14_MLP21_weights.49-0.0010.keras', 'PRSA_data_vola28_MLP7_weights.07-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0010.keras', 'PRSA_data_at_LSTM14_weights.15-0.4082.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0011.keras', 'PRSA_data_vola14_MLP14_weights.06-0.0006.keras', 'PRSA_data_vola14_LSTM21_weights.06-0.0012.keras', 'PRSA_data_price_LSTM_weights.17-67036.2891.keras', 'PRSA_data_vola28_MLP7_weights.03-0.0018.keras', 'PRSA_data_vola7_MLP28_weights.02-0.0039.keras', 'PRSA_data_vola7_LSTM28_weights.16-0.0035.keras', 'PRSA_data_vola21_MLP7_weights.20-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.33-0.0009.keras', 'PRSA_data_price_MLP21_weights.64-870.7020.keras', 'PRSA_data_vola21_MLP14_weights.22-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.14-0.0020.keras', 'PRSA_data_price_LSTM21_weights.17-42903.9648.keras', 'PRSA_data_vola21_MLP14_weights.01-0.0021.keras', 'PRSA_data_vola21_LSTM7_weights.16-0.0011.keras', 'PRSA_data_at_LSTM14_weights.11-0.6656.keras', 'PRSA_data_vola28_MLP7_weights.02-0.0019.keras', 'PRSA_data_vola14_MLP28_weights.05-0.0018.keras', 'PRSA_data_vola14_LSTM14_weights.08-0.0005.keras', 'PRSA_data_at_MLP21_weights.02-0.8171.keras', 'PRSA_data_price_MLP28_weights.02-8134.2202.keras', 'PRSA_data_price_LSTM28_weights.06-42852.3320.keras', 'PRSA_data_vola7_MLP14_weights.02-0.0036.keras', 'PRSA_data_vola14_LSTM28_weights.01-0.0026.keras', 'PRSA_data_at_MLP7_weights.04-9.8837.keras', 'PRSA_data_price_MLP28_weights.16-1274.4368.keras', 'PRSA_data_price_LSTM21_weights.07-43002.0078.keras', 'PRSA_data_vola7_MLP28_weights.23-0.0032.keras', 'PRSA_data_price_LSTM_weights.01-67196.3359.keras', 'PRSA_data_at_MLP28_weights.02-1.9156.keras', 'PRSA_data_vola14_MLP21_weights.02-0.0016.keras', 'PRSA_data_vola28_MLP14_weights.14-0.0007.keras', 'PRSA_data_price_LSTM28_weights.07-42842.4375.keras', 'PRSA_data_price_MLP14_weights.37-825.1805.keras', 'PRSA_data_vola7_MLP14_weights.07-0.0021.keras', 'PRSA_data_price_LSTM14_weights.04-51414.8945.keras', 'PRSA_data_price_MLP7_weights.42-1446.1339.keras', 'PRSA_data_price_MLP21_weights.57-932.6497.keras', 'PRSA_data_price_LSTM14_weights.19-51266.6133.keras', 'PRSA_data_vola21_MLP14_weights.34-0.0008.keras', 'PRSA_data_price_MLP14_weights.01-10485.8262.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0012.keras', 'PRSA_data_at_LSTM_weights.02-2.4966.keras', 'PRSA_data_vola21_LSTM28_weights.15-0.0009.keras', 'PRSA_data_price_LSTM_weights.20-67006.3047.keras', 'PRSA_data_price_LSTM_weights.14-67066.1016.keras', 'PRSA_data_price_LSTM21_weights.19-42884.2422.keras', 'PRSA_data_vola28_LSTM21_weights.18-0.0008.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0013.keras', 'PRSA_data_price_LSTM28_weights.20-42714.5508.keras', 'PRSA_data_at_LSTM14_weights.13-0.5609.keras', 'PRSA_data_vola28_LSTM28_weights.04-0.0009.keras', 'PRSA_data_vola14_MLP21_weights.01-0.0118.keras', 'PRSA_data_vola7_MLP21_weights.02-0.0032.keras', 'PRSA_data_at_MLP7_weights.74-0.2838.keras', 'PRSA_data_price_MLP28_weights.17-1217.0461.keras', 'PRSA_data_at_MLP21_weights.16-1.1973.keras', 'PRSA_data_vola7_MLP7_weights.02-0.0036.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0015.keras', 'PRSA_data_vola14_LSTM14_weights.04-0.0006.keras', 'PRSA_data_vola28_LSTM28_weights.18-0.0006.keras', 'PRSA_data_price_MLP21_weights.74-805.9489.keras', 'PRSA_data_vola21_MLP14_weights.02-0.0011.keras', 'PRSA_data_vola28_MLP21_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM_weights.11-0.0014.keras', 'PRSA_data_vola7_LSTM14_weights.14-0.0036.keras', 'PRSA_data_at_MLP28_weights.03-1.7712.keras', 'PRSA_data_price_LSTM21_weights.20-42874.3555.keras', 'PRSA_data_vola28_LSTM7_weights.20-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.04-0.0009.keras', 'PRSA_data_at_MLP7_weights.08-3.6716.keras', 'PRSA_data_at_MLP21_weights.04-0.0734.keras', 'PRSA_data_vola21_LSTM28_weights.03-0.0021.keras', 'PRSA_data_vola21_MLP14_weights.35-0.0008.keras', 'PRSA_data_vola28_LSTM7_weights.11-0.0012.keras', 'PRSA_data_price_LSTM28_weights.04-42872.1953.keras', 'PRSA_data_at_LSTM_weights.08-0.4803.keras', 'PRSA_data_vola28_MLP14_weights.05-0.0010.keras', 'PRSA_data_price_LSTM14_weights.14-51316.0625.keras', 'PRSA_data_vola7_MLP14_weights.01-0.0101.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0015.keras', 'PRSA_data_vola7_LSTM_weights.01-0.0040.keras', 'PRSA_data_vola14_MLP21_weights.09-0.0011.keras', 'PRSA_data_price_LSTM21_weights.05-43021.9570.keras', 'PRSA_data_vola28_MLP28_weights.03-0.0007.keras', 'PRSA_data_vola7_LSTM14_weights.10-0.0042.keras', 'PRSA_data_price_LSTM_weights.07-67135.3359.keras', 'PRSA_data_vola7_MLP14_weights.18-0.0020.keras', 'PRSA_data_price_LSTM21_weights.18-42894.1055.keras', 'PRSA_data_vola7_MLP7_weights.01-0.0042.keras', 'PRSA_data_vola14_LSTM_weights.05-0.0024.keras', 'PRSA_data_vola21_MLP14_weights.27-0.0008.keras', 'PRSA_data_vola28_MLP28_weights.02-0.0018.keras', 'PRSA_data_vola21_LSTM7_weights.03-0.0017.keras', 'PRSA_data_price_LSTM_weights.10-67105.4297.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0010.keras', 'PRSA_data_vola7_MLP21_weights.03-0.0030.keras', 'PRSA_data_at_MLP7_weights.06-5.3724.keras', 'PRSA_data_at_LSTM_weights.05-1.1226.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0026.keras', 'PRSA_data_vola21_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola14_MLP21_weights.05-0.0012.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.08-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.47-0.0008.keras', 'PRSA_data_price_LSTM14_weights.16-51296.3633.keras', 'PRSA_data_at_MLP7_weights.02-16.3421.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0030.keras', 'PRSA_data_vola21_MLP21_weights.09-0.0009.keras', 'PRSA_data_at_LSTM14_weights.01-6.2168.keras', 'PRSA_data_vola28_LSTM21_weights.01-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.16-0.0009.keras', 'PRSA_data_vola21_LSTM28_weights.07-0.0016.keras', 'PRSA_data_vola21_MLP28_weights.19-0.0010.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0009.keras', 'PRSA_data_vola14_MLP7_weights.09-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.02-0.0011.keras', 'PRSA_data_vola28_LSTM14_weights.03-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.09-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.09-0.0012.keras', 'PRSA_data_vola28_MLP21_weights.65-0.0007.keras', 'PRSA_data_vola21_MLP28_weights.15-0.0011.keras', 'PRSA_data_at_MLP7_weights.42-0.8694.keras', 'PRSA_data_vola21_LSTM28_weights.18-0.0008.keras', 'PRSA_data_vola21_MLP14_weights.04-0.0008.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0040.keras', 'PRSA_data_vola21_LSTM14_weights.12-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0029.keras', 'PRSA_data_vola21_MLP28_weights.01-0.0026.keras', 'PRSA_data_price_LSTM_weights.13-67075.9453.keras', 'PRSA_data_vola14_MLP7_weights.01-0.0041.keras', 'PRSA_data_price_MLP21_weights.02-1476.1034.keras', 'PRSA_data_vola14_LSTM28_weights.19-0.0014.keras', 'PRSA_data_price_MLP14_weights.96-663.0834.keras', 'PRSA_data_vola7_MLP21_weights.94-0.0024.keras', 'PRSA_data_price_LSTM21_weights.03-43041.9570.keras', 'PRSA_data_price_MLP7_weights.12-3798.9531.keras', 'PRSA_data_price_LSTM28_weights.10-42813.0352.keras', 'PRSA_data_vola14_MLP28_weights.20-0.0014.keras', 'PRSA_data_vola7_MLP21_weights.04-0.0027.keras', 'PRSA_data_vola7_LSTM28_weights.18-0.0035.keras', 'PRSA_data_at_MLP7_weights.10-2.7632.keras', 'PRSA_data_price_LSTM_weights.06-67145.4141.keras', 'PRSA_data_price_LSTM28_weights.13-42783.9805.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM28_weights.01-42902.4570.keras', 'PRSA_data_price_LSTM_weights.20-67005.9609.keras', 'PRSA_data_vola7_LSTM21_weights.04-0.0034.keras', 'PRSA_data_at_MLP14_weights.03-0.0461.keras', 'PRSA_data_vola28_MLP28_weights.01-0.0042.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0455.keras', 'PRSA_data_price_MLP28_weights.17-1182.9105.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0018.keras', 'PRSA_data_vola28_MLP14_weights.38-0.0006.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0013.keras', 'PRSA_data_vola7_LSTM21_weights.01-0.0038.keras', 'PRSA_data_price_LSTM14_weights.17-51286.4570.keras', 'PRSA_data_price_LSTM28_weights.16-42754.8008.keras', 'PRSA_data_price_LSTM14_weights.11-51345.4023.keras', 'PRSA_data_price_MLP14_weights.22-638.4409.keras', 'PRSA_data_vola7_MLP28_weights.32-0.0029.keras', 'PRSA_data_price_MLP7_weights.48-1648.1864.keras', 'PRSA_data_vola28_LSTM28_weights.10-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.03-0.0021.keras', 'PRSA_data_vola21_LSTM21_weights.13-0.0010.keras', 'PRSA_data_at_MLP7_weights.07-4.2627.keras', 'PRSA_data_vola7_LSTM21_weights.17-0.0029.keras', 'PRSA_data_vola21_LSTM28_weights.08-0.0011.keras', 'PRSA_data_price_MLP21_weights.05-1072.5081.keras', 'PRSA_data_vola7_LSTM21_weights.05-0.0030.keras', 'PRSA_data_price_MLP14_weights.29-856.8320.keras', 'PRSA_data_vola14_LSTM21_weights.11-0.0011.keras', 'PRSA_data_price_LSTM21_weights.10-42972.4219.keras', 'PRSA_data_price_LSTM_weights.02-67185.9453.keras', 'PRSA_data_vola28_MLP14_weights.21-0.0007.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.10-0.0009.keras', 'PRSA_data_price_LSTM28_weights.15-42764.5898.keras', 'PRSA_data_at_LSTM14_weights.07-0.7059.keras', 'PRSA_data_vola21_LSTM21_weights.01-0.0048.keras', 'PRSA_data_at_LSTM14_weights.03-1.3422.keras', 'PRSA_data_vola14_LSTM14_weights.02-0.0015.keras', 'PRSA_data_vola14_MLP14_weights.02-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.02-0.0034.keras', 'PRSA_data_vola7_MLP21_weights.21-0.0025.keras', 'PRSA_data_at_MLP7_weights.18-1.8396.keras', 'PRSA_data_price_LSTM_weights.11-67095.6172.keras', 'PRSA_data_at_LSTM14_weights.12-0.6378.keras', 'PRSA_data_vola21_LSTM21_weights.07-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.45-0.0030.keras', 'PRSA_data_at_MLP7_weights.26-0.8897.keras', 'PRSA_data_vola21_MLP7_weights.01-0.0014.keras', 'PRSA_data_price_MLP14_weights.19-982.5739.keras', 'PRSA_data_vola14_LSTM_weights.16-0.0017.keras', 'PRSA_data_price_MLP21_weights.94-813.8954.keras', 'PRSA_data_vola7_LSTM21_weights.20-0.0029.keras', 'PRSA_data_at_LSTM_weights.03-1.4842.keras', 'PRSA_data_vola21_MLP28_weights.02-0.0015.keras', 'PRSA_data_price_LSTM21_weights.12-42952.9453.keras', 'PRSA_data_price_MLP28_weights.17-1019.2816.keras', 'PRSA_data_vola14_LSTM_weights.13-0.0019.keras', 'PRSA_data_vola7_LSTM_weights.02-0.0038.keras', 'PRSA_data_vola7_LSTM28_weights.02-0.0042.keras', 'PRSA_data_at_LSTM14_weights.13-0.2270.keras', 'PRSA_data_price_LSTM_weights.18-67026.2891.keras', 'PRSA_data_vola28_MLP21_weights.66-0.0007.keras', 'PRSA_data_at_MLP14_weights.42-0.0373.keras', 'PRSA_data_vola7_MLP14_weights.38-0.0020.keras', 'PRSA_data_vola7_LSTM21_weights.11-0.0029.keras', 'PRSA_data_vola21_LSTM21_weights.12-0.0010.keras', 'PRSA_data_at_LSTM_weights.14-0.5158.keras', 'PRSA_data_vola14_LSTM21_weights.01-0.0019.keras', 'PRSA_data_vola28_LSTM21_weights.11-0.0009.keras', 'PRSA_data_vola7_LSTM28_weights.05-0.0038.keras', 'PRSA_data_vola14_MLP14_weights.10-0.0005.keras', 'PRSA_data_at_MLP21_weights.03-2.1540.keras', 'PRSA_data_at_LSTM_weights.16-0.5377.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.44-0.0008.keras', 'PRSA_data_at_MLP7_weights.16-1.9941.keras', 'PRSA_data_vola28_LSTM28_weights.01-0.0010.keras', 'PRSA_data_vola28_LSTM14_weights.19-0.0006.keras', 'PRSA_data_price_LSTM21_weights.14-42933.4180.keras', 'PRSA_data_vola14_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.82-828.7976.keras', 'PRSA_data_vola28_MLP21_weights.01-0.0030.keras', 'PRSA_data_price_LSTM14_weights.10-51355.2070.keras', 'PRSA_data_vola21_MLP21_weights.18-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.73-0.0007.keras', 'PRSA_data_vola28_MLP7_weights.20-0.0011.keras', 'PRSA_data_price_LSTM21_weights.13-42943.1719.keras', 'PRSA_data_vola28_MLP28_weights.36-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.04-0.0025.keras', 'PRSA_data_at_MLP14_weights.02-0.2715.keras', 'PRSA_data_price_LSTM14_weights.05-51404.8945.keras', 'PRSA_data_price_LSTM28_weights.19-42725.4297.keras', 'PRSA_data_price_MLP28_weights.01-1561.6656.keras', 'PRSA_data_vola21_MLP7_weights.14-0.0011.keras', 'PRSA_data_vola7_MLP7_weights.23-0.0033.keras', 'PRSA_data_vola14_MLP28_weights.01-0.0026.keras', 'PRSA_data_at_MLP14_weights.29-0.0323.keras', 'PRSA_data_vola7_MLP21_weights.01-0.0033.keras', 'PRSA_data_vola21_LSTM_weights.19-0.0014.keras', 'PRSA_data_price_MLP7_weights.03-5953.8076.keras', 'PRSA_data_price_MLP21_weights.42-948.6628.keras', 'PRSA_data_vola21_LSTM14_weights.07-0.0008.keras', 'PRSA_data_vola14_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola7_LSTM28_weights.13-0.0037.keras', 'PRSA_data_vola21_MLP21_weights.01-0.0016.keras', 'PRSA_data_vola14_LSTM21_weights.08-0.0011.keras', 'PRSA_data_price_LSTM21_weights.06-43011.9570.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0019.keras', 'PRSA_data_vola7_MLP28_weights.16-0.0033.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0008.keras', 'PRSA_data_vola28_MLP14_weights.01-0.0031.keras', 'PRSA_data_vola14_MLP28_weights.11-0.0015.keras', 'PRSA_data_price_LSTM14_weights.03-51425.0000.keras', 'PRSA_data_vola7_LSTM28_weights.04-0.0039.keras', 'PRSA_data_vola28_LSTM7_weights.13-0.0011.keras', 'PRSA_data_price_LSTM14_weights.01-51445.4883.keras', 'PRSA_data_vola28_MLP28_weights.22-0.0006.keras', 'PRSA_data_price_LSTM_weights.09-67115.3359.keras', 'PRSA_data_vola28_LSTM7_weights.14-0.0011.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0050.keras', 'PRSA_data_vola21_MLP28_weights.17-0.0010.keras', 'PRSA_data_vola28_LSTM21_weights.09-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.13-0.0010.keras', 'PRSA_data_vola14_MLP14_weights.01-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.50-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.2184.keras', 'PRSA_data_vola14_LSTM_weights.02-0.0030.keras', 'PRSA_data_vola7_MLP28_weights.39-0.0030.keras', 'PRSA_data_price_LSTM21_weights.04-43031.9414.keras', 'PRSA_data_price_LSTM_weights.15-67056.2109.keras', 'PRSA_data_at_MLP7_weights.20-1.6814.keras', 'PRSA_data_vola28_LSTM7_weights.18-0.0010.keras', 'PRSA_data_at_MLP7_weights.95-0.2130.keras', 'PRSA_data_vola14_LSTM28_weights.03-0.0021.keras', 'PRSA_data_at_LSTM_weights.01-7.5653.keras', 'PRSA_data_at_MLP7_weights.05-7.2110.keras', 'PRSA_data_vola21_LSTM28_weights.20-0.0008.keras', 'PRSA_data_at_LSTM_weights.07-0.8569.keras', 'PRSA_data_vola14_LSTM28_weights.12-0.0014.keras', 'PRSA_data_price_LSTM21_weights.16-42913.7969.keras', 'PRSA_data_at_MLP28_weights.04-1.3772.keras', 'PRSA_data_price_LSTM_weights.08-67125.3047.keras', 'PRSA_data_price_LSTM14_weights.07-51384.8633.keras', 'PRSA_data_price_LSTM_weights.04-67165.6016.keras', 'PRSA_data_vola14_LSTM28_weights.15-0.0014.keras', 'PRSA_data_at_MLP14_weights.01-0.4195.keras', 'PRSA_data_price_LSTM28_weights.09-42822.7578.keras', 'PRSA_data_price_LSTM28_weights.03-42882.1953.keras', 'PRSA_data_at_LSTM14_weights.05-0.8408.keras', 'PRSA_data_vola7_MLP28_weights.08-0.0030.keras', 'PRSA_data_price_MLP14_weights.07-3004.8474.keras', 'PRSA_data_price_LSTM21_weights.15-42923.6367.keras', 'PRSA_data_vola7_MLP21_weights.81-0.0025.keras', 'PRSA_data_price_MLP21_weights.86-819.7106.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.05-0.0012.keras', 'PRSA_data_vola21_MLP14_weights.20-0.0007.keras', 'PRSA_data_vola14_MLP28_weights.42-0.0011.keras', 'PRSA_data_price_LSTM_weights.12-67085.7578.keras', 'PRSA_data_vola28_MLP21_weights.13-0.0008.keras', 'PRSA_data_vola14_MLP28_weights.08-0.0016.keras', 'PRSA_data_at_MLP7_weights.86-0.0883.keras', 'PRSA_data_vola28_LSTM14_weights.01-0.0020.keras', 'PRSA_data_vola21_MLP21_weights.71-0.0010.keras', 'PRSA_data_at_MLP28_weights.09-0.1537.keras', 'PRSA_data_vola7_MLP28_weights.48-0.0030.keras', 'PRSA_data_at_MLP28_weights.12-0.0354.keras', 'PRSA_data_at_LSTM14_weights.04-0.9732.keras', 'PRSA_data_at_LSTM14_weights.15-0.2154.keras', 'PRSA_data_vola14_MLP28_weights.21-0.0013.keras', 'PRSA_data_vola21_MLP14_weights.25-0.0009.keras', 'PRSA_data_vola14_LSTM14_weights.01-0.0027.keras', 'PRSA_data_vola21_LSTM28_weights.01-0.0039.keras', 'PRSA_data_price_MLP14_weights.02-8059.2510.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0013.keras', 'PRSA_data_vola28_MLP7_weights.04-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.02-0.0043.keras', 'PRSA_data_price_MLP14_weights.78-683.9230.keras', 'PRSA_data_price_MLP21_weights.19-1002.5925.keras', 'PRSA_data_vola28_MLP21_weights.92-0.0007.keras', 'PRSA_data_price_LSTM28_weights.14-42774.2500.keras', 'PRSA_data_vola21_MLP21_weights.06-0.0010.keras', 'PRSA_data_vola14_LSTM14_weights.03-0.0010.keras', 'PRSA_data_price_LSTM_weights.16-67046.2422.keras', 'PRSA_data_vola7_LSTM28_weights.07-0.0037.keras', 'PRSA_data_vola28_LSTM7_weights.01-0.0017.keras', 'PRSA_data_vola28_LSTM14_weights.02-0.0008.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0022.keras', 'PRSA_data_at_MLP21_weights.03-0.2039.keras', 'PRSA_data_vola14_LSTM28_weights.02-0.0023.keras', 'PRSA_data_at_MLP7_weights.03-13.2212.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0037.keras', 'PRSA_data_vola14_MLP21_weights.69-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.18-0.0013.keras', 'PRSA_data_vola7_MLP21_weights.49-0.0025.keras', 'PRSA_data_vola28_MLP28_weights.31-0.0006.keras', 'PRSA_data_price_MLP21_weights.77-841.5739.keras', 'PRSA_data_vola28_MLP14_weights.35-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.26-0.0031.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0065.keras', 'PRSA_data_price_LSTM14_weights.02-51435.1133.keras', 'PRSA_data_price_LSTM28_weights.20-42714.7695.keras', 'PRSA_data_vola28_LSTM28_weights.16-0.0006.keras', 'PRSA_data_vola14_LSTM28_weights.06-0.0016.keras', 'PRSA_data_vola21_MLP21_weights.13-0.0010.keras', 'PRSA_data_vola28_MLP14_weights.32-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.01-0.0043.keras', 'PRSA_data_vola21_MLP28_weights.08-0.0011.keras', 'PRSA_data_vola21_LSTM28_weights.10-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.14-0.0012.keras', 'PRSA_data_vola7_LSTM28_weights.01-0.0049.keras', 'PRSA_data_price_LSTM28_weights.20-42715.6250.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM_weights.05-67155.5078.keras', 'PRSA_data_price_MLP14_weights.14-1073.6493.keras', 'PRSA_data_vola14_LSTM_weights.03-0.0029.keras', 'PRSA_data_vola14_MLP7_weights.07-0.0021.keras', 'PRSA_data_at_MLP28_weights.03-0.0637.keras', 'PRSA_data_vola21_MLP28_weights.03-0.0012.keras', 'PRSA_data_at_MLP28_weights.05-0.1823.keras', 'PRSA_data_at_MLP21_weights.01-0.9762.keras', 'PRSA_data_vola7_LSTM28_weights.15-0.0035.keras', 'PRSA_data_price_LSTM21_weights.08-42992.0703.keras', 'PRSA_data_vola21_MLP21_weights.22-0.0010.keras', 'PRSA_data_vola28_LSTM28_weights.20-0.0006.keras', 'PRSA_data_at_MLP21_weights.01-4.3241.keras', 'PRSA_data_at_LSTM_weights.06-0.8671.keras', 'PRSA_data_price_MLP7_weights.37-1654.4709.keras', 'PRSA_data_at_LSTM_weights.20-0.4626.keras', 'PRSA_data_vola21_MLP28_weights.27-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.4729.keras', 'PRSA_data_at_MLP7_weights.24-1.4718.keras', 'PRSA_data_at_MLP7_weights.59-0.4636.keras', 'PRSA_data_vola14_LSTM14_weights.06-0.0005.keras', 'PRSA_data_at_MLP7_weights.14-2.2322.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0196.keras', 'PRSA_data_at_LSTM14_weights.19-0.3356.keras', 'PRSA_data_price_LSTM28_weights.18-42735.2305.keras', 'PRSA_data_price_LSTM14_weights.12-51335.6523.keras', 'PRSA_data_vola21_LSTM_weights.06-0.0017.keras', 'PRSA_data_price_LSTM28_weights.05-42862.2383.keras', 'PRSA_data_price_MLP7_weights.10-1248.9592.keras', 'PRSA_data_vola21_MLP28_weights.35-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.03-0.0013.keras', 'PRSA_data_price_MLP14_weights.04-5028.7607.keras', 'PRSA_data_price_LSTM14_weights.08-51374.8945.keras', 'PRSA_data_vola28_MLP14_weights.48-0.0007.keras']
# Predicciones usando el mejor modelo
if best21_lstm is not None:
train_preds_price_LSTM21 = best21_lstm.predict(X_train_price_lstm_21)
val_preds_price_LSTM21 = best21_lstm.predict(X_val_price_lstm_21)
test_preds_price_LSTM21 = best21_lstm.predict(X_test_price_lstm_21)
# Aplanar las predicciones si es necesario
train_preds_price_LSTM21 = np.squeeze(train_preds_price_LSTM21)
val_preds_price_LSTM21 = np.squeeze(val_preds_price_LSTM21)
test_preds_price_LSTM21 = np.squeeze(test_preds_price_LSTM21)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_price_LSTM21)
print("Predicciones de validación:", val_preds_price_LSTM21)
print("Predicciones de prueba:", test_preds_price_LSTM21)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
154/154 [==============================] - 1s 3ms/step
1/1 [==============================] - 0s 11ms/step
1/1 [==============================] - 0s 16ms/step
Predicciones de Entrenamiento: [6.4779758e-02 6.4779758e-02 6.4779758e-02 ... 2.0661783e+02 2.0661783e+02
2.0661783e+02]
Predicciones de validación: [206.61783 206.61783 206.61783 206.61783 206.61783 206.61783 206.61783
206.61783 206.61783 206.61783 206.61783 206.61783 206.61783 206.61783
206.61783 206.61783 206.61783 206.61783 206.61783 206.61783 206.61783]
Predicciones de prueba: [206.61783 206.61783 206.61783 206.61783 206.61783 206.61783 206.61783
206.61783 206.61783 206.61783 206.61783 206.61783 206.61783 206.61783
206.61783 206.61783 206.61783 206.61783 206.61783 206.61783 206.61783]
plot_model(data_train_plot_price21[-100:], data_val_plot_price21, data_test_plot_price21, val_preds_price_LSTM21, test_preds_price_LSTM21, "Predicciones usando Memoria a Corto y Largo Paso (LSTM) para un horizonte de 21 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train21, jarque_bera_pval_LSTM_train21 = diagnostic_plots(y_train_price21, train_preds_price_LSTM21)
Ljung-Box LB Statistic: 4903.966909
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_price_LSTM_train21 = metricas(y_train_price21,train_preds_price_LSTM21)
metrica_price_LSTM_train21.index = metrica_price_LSTM_train21.index.map({0: 'LSTM Entrenamiento Price τ = 21'})
metrica_price_LSTM_train21['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train21], index=metrica_price_LSTM_train21.index)
metrica_price_LSTM_train21['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train21], index=metrica_price_LSTM_train21.index)
metrica_price_LSTM_train21
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Price τ = 21 | 1.6280e+12 | 64.44% | 10128.41 | 3.3124e+08 | -44.31% | 0.0 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM21, jarque_bera_pvalLSTM21 = evaluate_residuals(data_test_plot_price21, test_preds_price_LSTM21)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test21 = metricas(y_test_price21,test_preds_price_LSTM21)
metrica_LSTM_test21.index = metrica_LSTM_test21.index.map({0: 'LSTM Prueba Price τ = 21'})
metrica_LSTM_test21['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM21], index=metrica_LSTM_test21.index)
metrica_LSTM_test21['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM21], index=metrica_LSTM_test21.index)
metrica_LSTM_test21
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Price τ = 21 | 6.2208e+10 | 99.62% | 54232.21 | 2.9623e+09 | -13890.3% | 0.008 | 0.8356 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_price_LSTM21)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_price21, train_preds_price_LSTM21, y_val_price21, val_preds_price_LSTM21, y_test_price21, test_preds_price_LSTM21)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo, ademas muestra un sesgo considerable a la derecha y una alta variabilidad.
Conjunto de Validación: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 28 días (\(\tau=28\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_price_lstm_28, X_val_price_lstm_28, X_test_price_lstm_28 = change_dimension_lstm(X_train_price28, X_val_price28, X_test_price28)
Shape of 3D arrays X: (4887, 28, 1) (28, 28, 1) (28, 28, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(28,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['tanh'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=10, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_price_lstm_28, y_train_price28)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=tanh, epochs=20, optimizer=SGD; total time= 44.6s
[CV] END ..........activation=tanh, epochs=20, optimizer=SGD; total time= 44.8s
[CV] END ..........activation=tanh, epochs=20, optimizer=SGD; total time= 44.9s
[CV] END ..........activation=tanh, epochs=20, optimizer=SGD; total time= 45.0s
[CV] END ..........activation=tanh, epochs=20, optimizer=SGD; total time= 45.2s
Mejor función de activación: tanh
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 10024.434961059937
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape28 = 28
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM28 = build_models_lstm(input_shape28, neurons_list, dropout_rates, 'SGD')
Model: "model_120"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_121 (InputLayer) [(None, 28, 1)] 0
lstm_104 (LSTM) (None, 28, 64) 16896
lstm_105 (LSTM) (None, 32) 12416
dropout_120 (Dropout) (None, 32) 0
dense_324 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_120"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_121 (InputLayer) [(None, 28, 1)] 0
lstm_104 (LSTM) (None, 28, 64) 16896
lstm_105 (LSTM) (None, 32) 12416
dropout_120 (Dropout) (None, 32) 0
dense_324 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_121"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_122 (InputLayer) [(None, 28, 1)] 0
lstm_106 (LSTM) (None, 28, 64) 16896
lstm_107 (LSTM) (None, 32) 12416
dropout_121 (Dropout) (None, 32) 0
dense_325 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_121"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_122 (InputLayer) [(None, 28, 1)] 0
lstm_106 (LSTM) (None, 28, 64) 16896
lstm_107 (LSTM) (None, 32) 12416
dropout_121 (Dropout) (None, 32) 0
dense_325 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_122"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_123 (InputLayer) [(None, 28, 1)] 0
lstm_108 (LSTM) (None, 28, 64) 16896
lstm_109 (LSTM) (None, 32) 12416
dropout_122 (Dropout) (None, 32) 0
dense_326 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_122"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_123 (InputLayer) [(None, 28, 1)] 0
lstm_108 (LSTM) (None, 28, 64) 16896
lstm_109 (LSTM) (None, 32) 12416
dropout_122 (Dropout) (None, 32) 0
dense_326 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_123"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_124 (InputLayer) [(None, 28, 1)] 0
lstm_110 (LSTM) (None, 28, 64) 16896
lstm_111 (LSTM) (None, 32) 12416
dropout_123 (Dropout) (None, 32) 0
dense_327 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_123"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_124 (InputLayer) [(None, 28, 1)] 0
lstm_110 (LSTM) (None, 28, 64) 16896
lstm_111 (LSTM) (None, 32) 12416
dropout_123 (Dropout) (None, 32) 0
dense_327 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_124"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_125 (InputLayer) [(None, 28, 1)] 0
lstm_112 (LSTM) (None, 28, 64) 16896
lstm_113 (LSTM) (None, 32) 12416
dropout_124 (Dropout) (None, 32) 0
dense_328 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_124"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_125 (InputLayer) [(None, 28, 1)] 0
lstm_112 (LSTM) (None, 28, 64) 16896
lstm_113 (LSTM) (None, 32) 12416
dropout_124 (Dropout) (None, 32) 0
dense_328 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_125"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_126 (InputLayer) [(None, 28, 1)] 0
lstm_114 (LSTM) (None, 28, 64) 16896
lstm_115 (LSTM) (None, 32) 12416
dropout_125 (Dropout) (None, 32) 0
dense_329 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_125"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_126 (InputLayer) [(None, 28, 1)] 0
lstm_114 (LSTM) (None, 28, 64) 16896
lstm_115 (LSTM) (None, 32) 12416
dropout_125 (Dropout) (None, 32) 0
dense_329 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_126"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_127 (InputLayer) [(None, 28, 1)] 0
lstm_116 (LSTM) (None, 28, 64) 16896
lstm_117 (LSTM) (None, 32) 12416
dropout_126 (Dropout) (None, 32) 0
dense_330 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_126"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_127 (InputLayer) [(None, 28, 1)] 0
lstm_116 (LSTM) (None, 28, 64) 16896
lstm_117 (LSTM) (None, 32) 12416
dropout_126 (Dropout) (None, 32) 0
dense_330 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_127"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_128 (InputLayer) [(None, 28, 1)] 0
lstm_118 (LSTM) (None, 28, 64) 16896
lstm_119 (LSTM) (None, 32) 12416
dropout_127 (Dropout) (None, 32) 0
dense_331 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_127"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_128 (InputLayer) [(None, 28, 1)] 0
lstm_118 (LSTM) (None, 28, 64) 16896
lstm_119 (LSTM) (None, 32) 12416
dropout_127 (Dropout) (None, 32) 0
dense_331 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_128"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_129 (InputLayer) [(None, 28, 1)] 0
lstm_120 (LSTM) (None, 28, 64) 16896
lstm_121 (LSTM) (None, 32) 12416
dropout_128 (Dropout) (None, 32) 0
dense_332 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_128"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_129 (InputLayer) [(None, 28, 1)] 0
lstm_120 (LSTM) (None, 28, 64) 16896
lstm_121 (LSTM) (None, 32) 12416
dropout_128 (Dropout) (None, 32) 0
dense_332 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_129"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_130 (InputLayer) [(None, 28, 1)] 0
lstm_122 (LSTM) (None, 28, 64) 16896
lstm_123 (LSTM) (None, 32) 12416
dropout_129 (Dropout) (None, 32) 0
dense_333 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_129"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_130 (InputLayer) [(None, 28, 1)] 0
lstm_122 (LSTM) (None, 28, 64) 16896
lstm_123 (LSTM) (None, 32) 12416
dropout_129 (Dropout) (None, 32) 0
dense_333 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_130"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_131 (InputLayer) [(None, 28, 1)] 0
lstm_124 (LSTM) (None, 28, 64) 16896
lstm_125 (LSTM) (None, 32) 12416
dropout_130 (Dropout) (None, 32) 0
dense_334 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_130"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_131 (InputLayer) [(None, 28, 1)] 0
lstm_124 (LSTM) (None, 28, 64) 16896
lstm_125 (LSTM) (None, 32) 12416
dropout_130 (Dropout) (None, 32) 0
dense_334 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_131"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_132 (InputLayer) [(None, 28, 1)] 0
lstm_126 (LSTM) (None, 28, 64) 16896
lstm_127 (LSTM) (None, 32) 12416
dropout_131 (Dropout) (None, 32) 0
dense_335 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_131"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_132 (InputLayer) [(None, 28, 1)] 0
lstm_126 (LSTM) (None, 28, 64) 16896
lstm_127 (LSTM) (None, 32) 12416
dropout_131 (Dropout) (None, 32) 0
dense_335 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_132"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_133 (InputLayer) [(None, 28, 1)] 0
lstm_128 (LSTM) (None, 28, 64) 16896
lstm_129 (LSTM) (None, 32) 12416
dropout_132 (Dropout) (None, 32) 0
dense_336 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_132"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_133 (InputLayer) [(None, 28, 1)] 0
lstm_128 (LSTM) (None, 28, 64) 16896
lstm_129 (LSTM) (None, 32) 12416
dropout_132 (Dropout) (None, 32) 0
dense_336 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_133"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_134 (InputLayer) [(None, 28, 1)] 0
lstm_130 (LSTM) (None, 28, 64) 16896
lstm_131 (LSTM) (None, 32) 12416
dropout_133 (Dropout) (None, 32) 0
dense_337 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_133"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_134 (InputLayer) [(None, 28, 1)] 0
lstm_130 (LSTM) (None, 28, 64) 16896
lstm_131 (LSTM) (None, 32) 12416
dropout_133 (Dropout) (None, 32) 0
dense_337 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_134"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_135 (InputLayer) [(None, 28, 1)] 0
lstm_132 (LSTM) (None, 28, 64) 16896
lstm_133 (LSTM) (None, 32) 12416
dropout_134 (Dropout) (None, 32) 0
dense_338 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_134"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_135 (InputLayer) [(None, 28, 1)] 0
lstm_132 (LSTM) (None, 28, 64) 16896
lstm_133 (LSTM) (None, 32) 12416
dropout_134 (Dropout) (None, 32) 0
dense_338 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_135"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_136 (InputLayer) [(None, 28, 1)] 0
lstm_134 (LSTM) (None, 28, 64) 16896
lstm_135 (LSTM) (None, 32) 12416
dropout_135 (Dropout) (None, 32) 0
dense_339 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_135"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_136 (InputLayer) [(None, 28, 1)] 0
lstm_134 (LSTM) (None, 28, 64) 16896
lstm_135 (LSTM) (None, 32) 12416
dropout_135 (Dropout) (None, 32) 0
dense_339 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights_at = os.path.join('keras_models', 'PRSA_data_price_LSTM28_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best28_lstm = ModelCheckpoint(save_weights_at, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM21.
import os
from joblib import dump, load
history_price_LSTM28 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM28):
filename = f'history_price_LSTM28_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_price_lstm_28, y=y_train_price28, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best28_lstm], validation_data=(X_val_price_lstm_28, y_val_price28),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_price_LSTM28.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_price_LSTM28_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM28_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM28_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM28_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM28_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM28_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM28_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM28_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM28_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM28_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM28_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM28_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM28_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM28_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM28_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_price_LSTM28_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_price_LSTM28_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best28_lstm = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best28_lstm = load_model(best_model_path) # Cargar el modelo
if best28_lstm is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_price_LSTM28_weights.20-42714.5508.keras con val_loss: 42714.5508
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
print("Archivos en el directorio 'keras_models':", files)
Archivos en el directorio 'keras_models': ['PRSA_data_vola14_MLP7_weights.04-0.0018.keras', 'PRSA_data_price_MLP21_weights.01-3579.4128.keras', 'PRSA_data_price_LSTM21_weights.01-43062.3047.keras', 'PRSA_data_price_LSTM28_weights.02-42892.2305.keras', 'PRSA_data_price_LSTM_weights.19-67016.3047.keras', 'PRSA_data_vola14_MLP21_weights.20-0.0011.keras', 'PRSA_data_price_LSTM14_weights.06-51394.8750.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.07-0.0033.keras', 'PRSA_data_price_LSTM21_weights.02-43052.0469.keras', 'PRSA_data_at_MLP7_weights.80-0.0780.keras', 'PRSA_data_vola21_LSTM28_weights.02-0.0022.keras', 'PRSA_data_vola21_LSTM14_weights.04-0.0011.keras', 'PRSA_data_vola7_LSTM21_weights.18-0.0029.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0013.keras', 'PRSA_data_vola28_MLP14_weights.04-0.0013.keras', 'PRSA_data_price_MLP14_weights.52-626.2623.keras', 'PRSA_data_vola7_MLP7_weights.15-0.0041.keras', 'PRSA_data_price_LSTM14_weights.13-51325.8516.keras', 'PRSA_data_vola14_MLP7_weights.02-0.0023.keras', 'PRSA_data_price_LSTM21_weights.11-42962.6562.keras', 'PRSA_data_vola7_LSTM28_weights.10-0.0037.keras', 'PRSA_data_at_LSTM_weights.04-1.2071.keras', 'PRSA_data_price_MLP7_weights.50-1342.5273.keras', 'PRSA_data_price_MLP14_weights.03-5108.7837.keras', 'PRSA_data_vola28_MLP7_weights.14-0.0012.keras', 'PRSA_data_at_MLP7_weights.11-2.4883.keras', 'PRSA_data_price_LSTM28_weights.08-42832.5430.keras', 'PRSA_data_price_MLP28_weights.01-9242.6553.keras', 'PRSA_data_vola21_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.16-1013.2251.keras', 'PRSA_data_price_MLP28_weights.13-1042.5178.keras', 'PRSA_data_vola28_MLP28_weights.34-0.0006.keras', 'PRSA_data_at_MLP7_weights.01-19.6635.keras', 'PRSA_data_at_MLP7_weights.19-1.8379.keras', 'PRSA_data_at_MLP7_weights.12-2.3991.keras', 'PRSA_data_price_MLP7_weights.15-1346.3275.keras', 'PRSA_data_vola14_MLP28_weights.02-0.0024.keras', 'PRSA_data_vola14_MLP28_weights.07-0.0017.keras', 'PRSA_data_at_MLP28_weights.05-0.0930.keras', 'PRSA_data_price_LSTM14_weights.20-51256.7266.keras', 'PRSA_data_vola21_LSTM21_weights.05-0.0013.keras', 'PRSA_data_price_MLP14_weights.12-1304.7009.keras', 'PRSA_data_vola7_LSTM14_weights.09-0.0044.keras', 'PRSA_data_vola28_MLP28_weights.50-0.0006.keras', 'PRSA_data_vola7_MLP14_weights.15-0.0020.keras', 'PRSA_data_price_MLP14_weights.08-1386.5365.keras', 'PRSA_data_price_LSTM21_weights.09-42982.2383.keras', 'PRSA_data_price_MLP28_weights.11-7366.8994.keras', 'PRSA_data_vola14_MLP7_weights.18-0.0015.keras', 'PRSA_data_price_MLP7_weights.14-1734.7238.keras', 'PRSA_data_price_LSTM14_weights.15-51306.2070.keras', 'PRSA_data_vola21_MLP28_weights.25-0.0010.keras', 'PRSA_data_at_LSTM14_weights.02-2.2175.keras', 'PRSA_data_vola28_LSTM7_weights.08-0.0014.keras', 'PRSA_data_price_MLP28_weights.10-1107.4047.keras', 'PRSA_data_vola28_MLP7_weights.01-0.0068.keras', 'PRSA_data_vola21_LSTM21_weights.03-0.0015.keras', 'PRSA_data_price_MLP7_weights.01-9253.3047.keras', 'PRSA_data_vola21_MLP14_weights.31-0.0007.keras', 'PRSA_data_price_LSTM28_weights.12-42793.6172.keras', 'PRSA_data_price_LSTM14_weights.09-51365.0000.keras', 'PRSA_data_vola28_MLP28_weights.42-0.0006.keras', 'PRSA_data_price_MLP7_weights.05-4792.5054.keras', 'PRSA_data_vola21_LSTM_weights.01-0.0024.keras', 'PRSA_data_price_LSTM28_weights.17-42745.0352.keras', 'PRSA_data_vola28_MLP28_weights.45-0.0006.keras', 'PRSA_data_vola28_MLP14_weights.08-0.0010.keras', 'PRSA_data_at_LSTM_weights.11-0.6498.keras', 'PRSA_data_vola28_MLP28_weights.39-0.0007.keras', 'PRSA_data_at_MLP21_weights.11-0.0326.keras', 'PRSA_data_price_LSTM_weights.20-67006.4141.keras', 'PRSA_data_vola28_MLP7_weights.06-0.0014.keras', 'PRSA_data_at_LSTM14_weights.06-0.8145.keras', 'PRSA_data_at_MLP28_weights.01-2.1902.keras', 'PRSA_data_price_MLP14_weights.46-814.1992.keras', '.DS_Store', 'PRSA_data_vola28_LSTM28_weights.12-0.0006.keras', 'PRSA_data_price_LSTM14_weights.18-51276.5625.keras', 'PRSA_data_vola21_LSTM14_weights.01-0.0078.keras', 'PRSA_data_vola14_MLP21_weights.04-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM14_weights.02-0.0011.keras', 'PRSA_data_at_MLP7_weights.09-2.9793.keras', 'PRSA_data_vola21_MLP14_weights.06-0.0007.keras', 'PRSA_data_vola14_MLP21_weights.78-0.0010.keras', 'PRSA_data_price_MLP14_weights.64-711.9107.keras', 'PRSA_data_vola14_MLP28_weights.31-0.0011.keras', 'PRSA_data_vola14_LSTM_weights.08-0.0020.keras', 'PRSA_data_vola14_LSTM21_weights.02-0.0018.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0014.keras', 'PRSA_data_vola14_MLP14_weights.29-0.0005.keras', 'PRSA_data_price_LSTM_weights.03-67175.7734.keras', 'PRSA_data_vola14_LSTM_weights.01-0.0068.keras', 'PRSA_data_price_LSTM28_weights.11-42803.3086.keras', 'PRSA_data_price_MLP28_weights.15-7107.3306.keras', 'PRSA_data_vola14_MLP21_weights.49-0.0010.keras', 'PRSA_data_vola28_MLP7_weights.07-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0010.keras', 'PRSA_data_at_LSTM14_weights.15-0.4082.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0011.keras', 'PRSA_data_vola14_MLP14_weights.06-0.0006.keras', 'PRSA_data_vola14_LSTM21_weights.06-0.0012.keras', 'PRSA_data_price_LSTM_weights.17-67036.2891.keras', 'PRSA_data_vola28_MLP7_weights.03-0.0018.keras', 'PRSA_data_vola7_MLP28_weights.02-0.0039.keras', 'PRSA_data_vola7_LSTM28_weights.16-0.0035.keras', 'PRSA_data_vola21_MLP7_weights.20-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.33-0.0009.keras', 'PRSA_data_price_MLP21_weights.64-870.7020.keras', 'PRSA_data_vola21_MLP14_weights.22-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.14-0.0020.keras', 'PRSA_data_price_LSTM21_weights.17-42903.9648.keras', 'PRSA_data_vola21_MLP14_weights.01-0.0021.keras', 'PRSA_data_vola21_LSTM7_weights.16-0.0011.keras', 'PRSA_data_at_LSTM14_weights.11-0.6656.keras', 'PRSA_data_vola28_MLP7_weights.02-0.0019.keras', 'PRSA_data_vola14_MLP28_weights.05-0.0018.keras', 'PRSA_data_vola14_LSTM14_weights.08-0.0005.keras', 'PRSA_data_at_MLP21_weights.02-0.8171.keras', 'PRSA_data_price_MLP28_weights.02-8134.2202.keras', 'PRSA_data_price_LSTM28_weights.06-42852.3320.keras', 'PRSA_data_vola7_MLP14_weights.02-0.0036.keras', 'PRSA_data_vola14_LSTM28_weights.01-0.0026.keras', 'PRSA_data_at_MLP7_weights.04-9.8837.keras', 'PRSA_data_price_MLP28_weights.16-1274.4368.keras', 'PRSA_data_price_LSTM21_weights.07-43002.0078.keras', 'PRSA_data_vola7_MLP28_weights.23-0.0032.keras', 'PRSA_data_price_LSTM_weights.01-67196.3359.keras', 'PRSA_data_at_MLP28_weights.02-1.9156.keras', 'PRSA_data_vola14_MLP21_weights.02-0.0016.keras', 'PRSA_data_vola28_MLP14_weights.14-0.0007.keras', 'PRSA_data_price_LSTM28_weights.07-42842.4375.keras', 'PRSA_data_price_MLP14_weights.37-825.1805.keras', 'PRSA_data_vola7_MLP14_weights.07-0.0021.keras', 'PRSA_data_price_LSTM14_weights.04-51414.8945.keras', 'PRSA_data_price_MLP7_weights.42-1446.1339.keras', 'PRSA_data_price_MLP21_weights.57-932.6497.keras', 'PRSA_data_price_LSTM14_weights.19-51266.6133.keras', 'PRSA_data_vola21_MLP14_weights.34-0.0008.keras', 'PRSA_data_price_MLP14_weights.01-10485.8262.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0012.keras', 'PRSA_data_at_LSTM_weights.02-2.4966.keras', 'PRSA_data_vola21_LSTM28_weights.15-0.0009.keras', 'PRSA_data_price_LSTM_weights.20-67006.3047.keras', 'PRSA_data_price_LSTM_weights.14-67066.1016.keras', 'PRSA_data_price_LSTM21_weights.19-42884.2422.keras', 'PRSA_data_vola28_LSTM21_weights.18-0.0008.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0013.keras', 'PRSA_data_price_LSTM28_weights.20-42714.5508.keras', 'PRSA_data_at_LSTM14_weights.13-0.5609.keras', 'PRSA_data_vola28_LSTM28_weights.04-0.0009.keras', 'PRSA_data_vola14_MLP21_weights.01-0.0118.keras', 'PRSA_data_vola7_MLP21_weights.02-0.0032.keras', 'PRSA_data_at_MLP7_weights.74-0.2838.keras', 'PRSA_data_price_MLP28_weights.17-1217.0461.keras', 'PRSA_data_at_MLP21_weights.16-1.1973.keras', 'PRSA_data_vola7_MLP7_weights.02-0.0036.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0015.keras', 'PRSA_data_vola14_LSTM14_weights.04-0.0006.keras', 'PRSA_data_vola28_LSTM28_weights.18-0.0006.keras', 'PRSA_data_price_MLP21_weights.74-805.9489.keras', 'PRSA_data_vola21_MLP14_weights.02-0.0011.keras', 'PRSA_data_vola28_MLP21_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM_weights.11-0.0014.keras', 'PRSA_data_vola7_LSTM14_weights.14-0.0036.keras', 'PRSA_data_at_MLP28_weights.03-1.7712.keras', 'PRSA_data_price_LSTM21_weights.20-42874.3555.keras', 'PRSA_data_vola28_LSTM7_weights.20-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.04-0.0009.keras', 'PRSA_data_at_MLP7_weights.08-3.6716.keras', 'PRSA_data_at_MLP21_weights.04-0.0734.keras', 'PRSA_data_vola21_LSTM28_weights.03-0.0021.keras', 'PRSA_data_vola21_MLP14_weights.35-0.0008.keras', 'PRSA_data_vola28_LSTM7_weights.11-0.0012.keras', 'PRSA_data_price_LSTM28_weights.04-42872.1953.keras', 'PRSA_data_at_LSTM_weights.08-0.4803.keras', 'PRSA_data_vola28_MLP14_weights.05-0.0010.keras', 'PRSA_data_price_LSTM14_weights.14-51316.0625.keras', 'PRSA_data_vola7_MLP14_weights.01-0.0101.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0015.keras', 'PRSA_data_vola7_LSTM_weights.01-0.0040.keras', 'PRSA_data_vola14_MLP21_weights.09-0.0011.keras', 'PRSA_data_price_LSTM21_weights.05-43021.9570.keras', 'PRSA_data_vola28_MLP28_weights.03-0.0007.keras', 'PRSA_data_vola7_LSTM14_weights.10-0.0042.keras', 'PRSA_data_price_LSTM_weights.07-67135.3359.keras', 'PRSA_data_vola7_MLP14_weights.18-0.0020.keras', 'PRSA_data_price_LSTM21_weights.18-42894.1055.keras', 'PRSA_data_vola7_MLP7_weights.01-0.0042.keras', 'PRSA_data_vola14_LSTM_weights.05-0.0024.keras', 'PRSA_data_vola21_MLP14_weights.27-0.0008.keras', 'PRSA_data_vola28_MLP28_weights.02-0.0018.keras', 'PRSA_data_vola21_LSTM7_weights.03-0.0017.keras', 'PRSA_data_price_LSTM_weights.10-67105.4297.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0010.keras', 'PRSA_data_vola7_MLP21_weights.03-0.0030.keras', 'PRSA_data_at_MLP7_weights.06-5.3724.keras', 'PRSA_data_at_LSTM_weights.05-1.1226.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0026.keras', 'PRSA_data_vola21_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola14_MLP21_weights.05-0.0012.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.08-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.47-0.0008.keras', 'PRSA_data_price_LSTM14_weights.16-51296.3633.keras', 'PRSA_data_at_MLP7_weights.02-16.3421.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0030.keras', 'PRSA_data_vola21_MLP21_weights.09-0.0009.keras', 'PRSA_data_at_LSTM14_weights.01-6.2168.keras', 'PRSA_data_vola28_LSTM21_weights.01-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.16-0.0009.keras', 'PRSA_data_vola21_LSTM28_weights.07-0.0016.keras', 'PRSA_data_vola21_MLP28_weights.19-0.0010.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0009.keras', 'PRSA_data_vola14_MLP7_weights.09-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.02-0.0011.keras', 'PRSA_data_vola28_LSTM14_weights.03-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.09-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.09-0.0012.keras', 'PRSA_data_vola28_MLP21_weights.65-0.0007.keras', 'PRSA_data_vola21_MLP28_weights.15-0.0011.keras', 'PRSA_data_at_MLP7_weights.42-0.8694.keras', 'PRSA_data_vola21_LSTM28_weights.18-0.0008.keras', 'PRSA_data_vola21_MLP14_weights.04-0.0008.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0040.keras', 'PRSA_data_vola21_LSTM14_weights.12-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0029.keras', 'PRSA_data_vola21_MLP28_weights.01-0.0026.keras', 'PRSA_data_price_LSTM_weights.13-67075.9453.keras', 'PRSA_data_vola14_MLP7_weights.01-0.0041.keras', 'PRSA_data_price_MLP21_weights.02-1476.1034.keras', 'PRSA_data_vola14_LSTM28_weights.19-0.0014.keras', 'PRSA_data_price_MLP14_weights.96-663.0834.keras', 'PRSA_data_vola7_MLP21_weights.94-0.0024.keras', 'PRSA_data_price_LSTM21_weights.03-43041.9570.keras', 'PRSA_data_price_MLP7_weights.12-3798.9531.keras', 'PRSA_data_price_LSTM28_weights.10-42813.0352.keras', 'PRSA_data_vola14_MLP28_weights.20-0.0014.keras', 'PRSA_data_vola7_MLP21_weights.04-0.0027.keras', 'PRSA_data_vola7_LSTM28_weights.18-0.0035.keras', 'PRSA_data_at_MLP7_weights.10-2.7632.keras', 'PRSA_data_price_LSTM_weights.06-67145.4141.keras', 'PRSA_data_price_LSTM28_weights.13-42783.9805.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM28_weights.01-42902.4570.keras', 'PRSA_data_price_LSTM_weights.20-67005.9609.keras', 'PRSA_data_vola7_LSTM21_weights.04-0.0034.keras', 'PRSA_data_at_MLP14_weights.03-0.0461.keras', 'PRSA_data_vola28_MLP28_weights.01-0.0042.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0455.keras', 'PRSA_data_price_MLP28_weights.17-1182.9105.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0018.keras', 'PRSA_data_vola28_MLP14_weights.38-0.0006.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0013.keras', 'PRSA_data_vola7_LSTM21_weights.01-0.0038.keras', 'PRSA_data_price_LSTM14_weights.17-51286.4570.keras', 'PRSA_data_price_LSTM28_weights.16-42754.8008.keras', 'PRSA_data_price_LSTM14_weights.11-51345.4023.keras', 'PRSA_data_price_MLP14_weights.22-638.4409.keras', 'PRSA_data_vola7_MLP28_weights.32-0.0029.keras', 'PRSA_data_price_MLP7_weights.48-1648.1864.keras', 'PRSA_data_vola28_LSTM28_weights.10-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.03-0.0021.keras', 'PRSA_data_vola21_LSTM21_weights.13-0.0010.keras', 'PRSA_data_at_MLP7_weights.07-4.2627.keras', 'PRSA_data_vola7_LSTM21_weights.17-0.0029.keras', 'PRSA_data_vola21_LSTM28_weights.08-0.0011.keras', 'PRSA_data_price_MLP21_weights.05-1072.5081.keras', 'PRSA_data_vola7_LSTM21_weights.05-0.0030.keras', 'PRSA_data_price_MLP14_weights.29-856.8320.keras', 'PRSA_data_vola14_LSTM21_weights.11-0.0011.keras', 'PRSA_data_price_LSTM21_weights.10-42972.4219.keras', 'PRSA_data_price_LSTM_weights.02-67185.9453.keras', 'PRSA_data_vola28_MLP14_weights.21-0.0007.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.10-0.0009.keras', 'PRSA_data_price_LSTM28_weights.15-42764.5898.keras', 'PRSA_data_at_LSTM14_weights.07-0.7059.keras', 'PRSA_data_vola21_LSTM21_weights.01-0.0048.keras', 'PRSA_data_at_LSTM14_weights.03-1.3422.keras', 'PRSA_data_vola14_LSTM14_weights.02-0.0015.keras', 'PRSA_data_vola14_MLP14_weights.02-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.02-0.0034.keras', 'PRSA_data_vola7_MLP21_weights.21-0.0025.keras', 'PRSA_data_at_MLP7_weights.18-1.8396.keras', 'PRSA_data_price_LSTM_weights.11-67095.6172.keras', 'PRSA_data_at_LSTM14_weights.12-0.6378.keras', 'PRSA_data_vola21_LSTM21_weights.07-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.45-0.0030.keras', 'PRSA_data_at_MLP7_weights.26-0.8897.keras', 'PRSA_data_vola21_MLP7_weights.01-0.0014.keras', 'PRSA_data_price_MLP14_weights.19-982.5739.keras', 'PRSA_data_vola14_LSTM_weights.16-0.0017.keras', 'PRSA_data_price_MLP21_weights.94-813.8954.keras', 'PRSA_data_vola7_LSTM21_weights.20-0.0029.keras', 'PRSA_data_at_LSTM_weights.03-1.4842.keras', 'PRSA_data_vola21_MLP28_weights.02-0.0015.keras', 'PRSA_data_price_LSTM21_weights.12-42952.9453.keras', 'PRSA_data_price_MLP28_weights.17-1019.2816.keras', 'PRSA_data_vola14_LSTM_weights.13-0.0019.keras', 'PRSA_data_vola7_LSTM_weights.02-0.0038.keras', 'PRSA_data_vola7_LSTM28_weights.02-0.0042.keras', 'PRSA_data_at_LSTM14_weights.13-0.2270.keras', 'PRSA_data_price_LSTM_weights.18-67026.2891.keras', 'PRSA_data_vola28_MLP21_weights.66-0.0007.keras', 'PRSA_data_at_MLP14_weights.42-0.0373.keras', 'PRSA_data_vola7_MLP14_weights.38-0.0020.keras', 'PRSA_data_vola7_LSTM21_weights.11-0.0029.keras', 'PRSA_data_vola21_LSTM21_weights.12-0.0010.keras', 'PRSA_data_at_LSTM_weights.14-0.5158.keras', 'PRSA_data_vola14_LSTM21_weights.01-0.0019.keras', 'PRSA_data_vola28_LSTM21_weights.11-0.0009.keras', 'PRSA_data_vola7_LSTM28_weights.05-0.0038.keras', 'PRSA_data_vola14_MLP14_weights.10-0.0005.keras', 'PRSA_data_at_MLP21_weights.03-2.1540.keras', 'PRSA_data_at_LSTM_weights.16-0.5377.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.44-0.0008.keras', 'PRSA_data_at_MLP7_weights.16-1.9941.keras', 'PRSA_data_vola28_LSTM28_weights.01-0.0010.keras', 'PRSA_data_vola28_LSTM14_weights.19-0.0006.keras', 'PRSA_data_price_LSTM21_weights.14-42933.4180.keras', 'PRSA_data_vola14_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.82-828.7976.keras', 'PRSA_data_vola28_MLP21_weights.01-0.0030.keras', 'PRSA_data_price_LSTM14_weights.10-51355.2070.keras', 'PRSA_data_vola21_MLP21_weights.18-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.73-0.0007.keras', 'PRSA_data_vola28_MLP7_weights.20-0.0011.keras', 'PRSA_data_price_LSTM21_weights.13-42943.1719.keras', 'PRSA_data_vola28_MLP28_weights.36-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.04-0.0025.keras', 'PRSA_data_at_MLP14_weights.02-0.2715.keras', 'PRSA_data_price_LSTM14_weights.05-51404.8945.keras', 'PRSA_data_price_LSTM28_weights.19-42725.4297.keras', 'PRSA_data_price_MLP28_weights.01-1561.6656.keras', 'PRSA_data_vola21_MLP7_weights.14-0.0011.keras', 'PRSA_data_vola7_MLP7_weights.23-0.0033.keras', 'PRSA_data_vola14_MLP28_weights.01-0.0026.keras', 'PRSA_data_at_MLP14_weights.29-0.0323.keras', 'PRSA_data_vola7_MLP21_weights.01-0.0033.keras', 'PRSA_data_vola21_LSTM_weights.19-0.0014.keras', 'PRSA_data_price_MLP7_weights.03-5953.8076.keras', 'PRSA_data_price_MLP21_weights.42-948.6628.keras', 'PRSA_data_vola21_LSTM14_weights.07-0.0008.keras', 'PRSA_data_vola14_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola7_LSTM28_weights.13-0.0037.keras', 'PRSA_data_vola21_MLP21_weights.01-0.0016.keras', 'PRSA_data_vola14_LSTM21_weights.08-0.0011.keras', 'PRSA_data_price_LSTM21_weights.06-43011.9570.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0019.keras', 'PRSA_data_vola7_MLP28_weights.16-0.0033.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0008.keras', 'PRSA_data_vola28_MLP14_weights.01-0.0031.keras', 'PRSA_data_vola14_MLP28_weights.11-0.0015.keras', 'PRSA_data_price_LSTM14_weights.03-51425.0000.keras', 'PRSA_data_vola7_LSTM28_weights.04-0.0039.keras', 'PRSA_data_vola28_LSTM7_weights.13-0.0011.keras', 'PRSA_data_price_LSTM14_weights.01-51445.4883.keras', 'PRSA_data_vola28_MLP28_weights.22-0.0006.keras', 'PRSA_data_price_LSTM_weights.09-67115.3359.keras', 'PRSA_data_vola28_LSTM7_weights.14-0.0011.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0050.keras', 'PRSA_data_vola21_MLP28_weights.17-0.0010.keras', 'PRSA_data_vola28_LSTM21_weights.09-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.13-0.0010.keras', 'PRSA_data_vola14_MLP14_weights.01-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.50-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.2184.keras', 'PRSA_data_vola14_LSTM_weights.02-0.0030.keras', 'PRSA_data_vola7_MLP28_weights.39-0.0030.keras', 'PRSA_data_price_LSTM21_weights.04-43031.9414.keras', 'PRSA_data_price_LSTM_weights.15-67056.2109.keras', 'PRSA_data_at_MLP7_weights.20-1.6814.keras', 'PRSA_data_vola28_LSTM7_weights.18-0.0010.keras', 'PRSA_data_at_MLP7_weights.95-0.2130.keras', 'PRSA_data_vola14_LSTM28_weights.03-0.0021.keras', 'PRSA_data_at_LSTM_weights.01-7.5653.keras', 'PRSA_data_at_MLP7_weights.05-7.2110.keras', 'PRSA_data_vola21_LSTM28_weights.20-0.0008.keras', 'PRSA_data_at_LSTM_weights.07-0.8569.keras', 'PRSA_data_vola14_LSTM28_weights.12-0.0014.keras', 'PRSA_data_price_LSTM21_weights.16-42913.7969.keras', 'PRSA_data_at_MLP28_weights.04-1.3772.keras', 'PRSA_data_price_LSTM_weights.08-67125.3047.keras', 'PRSA_data_price_LSTM14_weights.07-51384.8633.keras', 'PRSA_data_price_LSTM_weights.04-67165.6016.keras', 'PRSA_data_vola14_LSTM28_weights.15-0.0014.keras', 'PRSA_data_at_MLP14_weights.01-0.4195.keras', 'PRSA_data_price_LSTM28_weights.09-42822.7578.keras', 'PRSA_data_price_LSTM28_weights.03-42882.1953.keras', 'PRSA_data_at_LSTM14_weights.05-0.8408.keras', 'PRSA_data_vola7_MLP28_weights.08-0.0030.keras', 'PRSA_data_price_MLP14_weights.07-3004.8474.keras', 'PRSA_data_price_LSTM21_weights.15-42923.6367.keras', 'PRSA_data_vola7_MLP21_weights.81-0.0025.keras', 'PRSA_data_price_MLP21_weights.86-819.7106.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.05-0.0012.keras', 'PRSA_data_vola21_MLP14_weights.20-0.0007.keras', 'PRSA_data_vola14_MLP28_weights.42-0.0011.keras', 'PRSA_data_price_LSTM_weights.12-67085.7578.keras', 'PRSA_data_vola28_MLP21_weights.13-0.0008.keras', 'PRSA_data_vola14_MLP28_weights.08-0.0016.keras', 'PRSA_data_at_MLP7_weights.86-0.0883.keras', 'PRSA_data_vola28_LSTM14_weights.01-0.0020.keras', 'PRSA_data_vola21_MLP21_weights.71-0.0010.keras', 'PRSA_data_at_MLP28_weights.09-0.1537.keras', 'PRSA_data_vola7_MLP28_weights.48-0.0030.keras', 'PRSA_data_at_MLP28_weights.12-0.0354.keras', 'PRSA_data_at_LSTM14_weights.04-0.9732.keras', 'PRSA_data_at_LSTM14_weights.15-0.2154.keras', 'PRSA_data_vola14_MLP28_weights.21-0.0013.keras', 'PRSA_data_vola21_MLP14_weights.25-0.0009.keras', 'PRSA_data_vola14_LSTM14_weights.01-0.0027.keras', 'PRSA_data_vola21_LSTM28_weights.01-0.0039.keras', 'PRSA_data_price_MLP14_weights.02-8059.2510.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0013.keras', 'PRSA_data_vola28_MLP7_weights.04-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.02-0.0043.keras', 'PRSA_data_price_MLP14_weights.78-683.9230.keras', 'PRSA_data_price_MLP21_weights.19-1002.5925.keras', 'PRSA_data_vola28_MLP21_weights.92-0.0007.keras', 'PRSA_data_price_LSTM28_weights.14-42774.2500.keras', 'PRSA_data_vola21_MLP21_weights.06-0.0010.keras', 'PRSA_data_vola14_LSTM14_weights.03-0.0010.keras', 'PRSA_data_price_LSTM_weights.16-67046.2422.keras', 'PRSA_data_vola7_LSTM28_weights.07-0.0037.keras', 'PRSA_data_vola28_LSTM7_weights.01-0.0017.keras', 'PRSA_data_vola28_LSTM14_weights.02-0.0008.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0022.keras', 'PRSA_data_at_MLP21_weights.03-0.2039.keras', 'PRSA_data_vola14_LSTM28_weights.02-0.0023.keras', 'PRSA_data_at_MLP7_weights.03-13.2212.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0037.keras', 'PRSA_data_vola14_MLP21_weights.69-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.18-0.0013.keras', 'PRSA_data_vola7_MLP21_weights.49-0.0025.keras', 'PRSA_data_vola28_MLP28_weights.31-0.0006.keras', 'PRSA_data_price_MLP21_weights.77-841.5739.keras', 'PRSA_data_vola28_MLP14_weights.35-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.26-0.0031.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0065.keras', 'PRSA_data_price_LSTM14_weights.02-51435.1133.keras', 'PRSA_data_price_LSTM28_weights.20-42714.7695.keras', 'PRSA_data_vola28_LSTM28_weights.16-0.0006.keras', 'PRSA_data_vola14_LSTM28_weights.06-0.0016.keras', 'PRSA_data_vola21_MLP21_weights.13-0.0010.keras', 'PRSA_data_vola28_MLP14_weights.32-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.01-0.0043.keras', 'PRSA_data_vola21_MLP28_weights.08-0.0011.keras', 'PRSA_data_vola21_LSTM28_weights.10-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.14-0.0012.keras', 'PRSA_data_vola7_LSTM28_weights.01-0.0049.keras', 'PRSA_data_price_LSTM28_weights.20-42715.6250.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM_weights.05-67155.5078.keras', 'PRSA_data_price_MLP14_weights.14-1073.6493.keras', 'PRSA_data_vola14_LSTM_weights.03-0.0029.keras', 'PRSA_data_vola14_MLP7_weights.07-0.0021.keras', 'PRSA_data_at_MLP28_weights.03-0.0637.keras', 'PRSA_data_vola21_MLP28_weights.03-0.0012.keras', 'PRSA_data_at_MLP28_weights.05-0.1823.keras', 'PRSA_data_at_MLP21_weights.01-0.9762.keras', 'PRSA_data_vola7_LSTM28_weights.15-0.0035.keras', 'PRSA_data_price_LSTM21_weights.08-42992.0703.keras', 'PRSA_data_vola21_MLP21_weights.22-0.0010.keras', 'PRSA_data_vola28_LSTM28_weights.20-0.0006.keras', 'PRSA_data_at_MLP21_weights.01-4.3241.keras', 'PRSA_data_at_LSTM_weights.06-0.8671.keras', 'PRSA_data_price_MLP7_weights.37-1654.4709.keras', 'PRSA_data_at_LSTM_weights.20-0.4626.keras', 'PRSA_data_vola21_MLP28_weights.27-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.4729.keras', 'PRSA_data_at_MLP7_weights.24-1.4718.keras', 'PRSA_data_at_MLP7_weights.59-0.4636.keras', 'PRSA_data_vola14_LSTM14_weights.06-0.0005.keras', 'PRSA_data_at_MLP7_weights.14-2.2322.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0196.keras', 'PRSA_data_at_LSTM14_weights.19-0.3356.keras', 'PRSA_data_price_LSTM28_weights.18-42735.2305.keras', 'PRSA_data_price_LSTM14_weights.12-51335.6523.keras', 'PRSA_data_vola21_LSTM_weights.06-0.0017.keras', 'PRSA_data_price_LSTM28_weights.05-42862.2383.keras', 'PRSA_data_price_MLP7_weights.10-1248.9592.keras', 'PRSA_data_vola21_MLP28_weights.35-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.03-0.0013.keras', 'PRSA_data_price_MLP14_weights.04-5028.7607.keras', 'PRSA_data_price_LSTM14_weights.08-51374.8945.keras', 'PRSA_data_vola28_MLP14_weights.48-0.0007.keras']
# Predicciones usando el mejor modelo
if best28_lstm is not None:
train_preds_price_LSTM28 = best28_lstm.predict(X_train_price_lstm_28)
val_preds_price_LSTM28 = best28_lstm.predict(X_val_price_lstm_28)
test_preds_price_LSTM28 = best28_lstm.predict(X_test_price_lstm_28)
# Aplanar las predicciones si es necesario
train_preds_price_LSTM28 = np.squeeze(train_preds_price_LSTM28)
val_preds_price_LSTM28 = np.squeeze(val_preds_price_LSTM28)
test_preds_price_LSTM28 = np.squeeze(test_preds_price_LSTM28)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_price_LSTM28)
print("Predicciones de validación:", val_preds_price_LSTM28)
print("Predicciones de prueba:", test_preds_price_LSTM28)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
153/153 [==============================] - 1s 2ms/step
1/1 [==============================] - 0s 9ms/step
1/1 [==============================] - 0s 9ms/step
Predicciones de Entrenamiento: [-3.0216217e-02 -3.0216217e-02 -3.0216217e-02 ... 2.0502954e+02
2.0502954e+02 2.0502954e+02]
Predicciones de validación: [205.02954 205.02954 205.02954 205.02954 205.02954 205.02954 205.02954
205.02954 205.02954 205.02954 205.02954 205.02954 205.02954 205.02954
205.02954 205.02954 205.02954 205.02954 205.02954 205.02954 205.02954
205.02954 205.02954 205.02954 205.02954 205.02954 205.02954 205.02954]
Predicciones de prueba: [205.02954 205.02954 205.02954 205.02954 205.02954 205.02954 205.02954
205.02954 205.02954 205.02954 205.02954 205.02954 205.02954 205.02954
205.02954 205.02954 205.02954 205.02954 205.02954 205.02954 205.02954
205.02954 205.02954 205.02954 205.02954 205.02954 205.02954 205.02954]
plot_model(data_train_plot_price28[-100:], data_val_plot_price28, data_test_plot_price28, val_preds_price_LSTM28, test_preds_price_LSTM28, "Predicciones usando Memoria a Corto y Largo Paso (LSTM) para un horizonte de 28 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train28, jarque_bera_pval_LSTM_train28 = diagnostic_plots(y_train_price28, train_preds_price_LSTM28)
Ljung-Box LB Statistic: 4875.690989
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_price_LSTM_train28 = metricas(y_train_price28,train_preds_price_LSTM28)
metrica_price_LSTM_train28.index = metrica_price_LSTM_train28.index.map({0: 'LSTM Entrenamiento Price τ = 28'})
metrica_price_LSTM_train28['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train28], index=metrica_price_LSTM_train28.index)
metrica_price_LSTM_train28['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train28], index=metrica_price_LSTM_train28.index)
metrica_price_LSTM_train28
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Price τ = 28 | 1.5885e+12 | 66.24% | 10000.76 | 3.2505e+08 | -43.89% | 0.0 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM28, jarque_bera_pvalLSTM28 = evaluate_residuals(data_test_plot_price28, test_preds_price_LSTM28)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test28 = metricas(y_test_price28,test_preds_price_LSTM28)
metrica_LSTM_test28.index = metrica_LSTM_test28.index.map({0: 'LSTM Prueba Price τ = 28'})
metrica_LSTM_test28['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM28], index=metrica_LSTM_test28.index)
metrica_LSTM_test28['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM28], index=metrica_LSTM_test28.index)
metrica_LSTM_test28
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Price τ = 28 | 6.3986e+10 | 99.57% | 47643.9 | 2.2852e+09 | -14852.49% | 0.0003 | 0.427 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_price_LSTM28)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_price28, train_preds_price_LSTM28, y_val_price28, val_preds_price_LSTM28, y_test_price28, test_preds_price_LSTM28)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo, ademas muestra un sesgo considerable a la derecha y una alta variabilidad.
Conjunto de Validación: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Retorno Acumulado (A_t): Perceptrones Multicapa (MLP)#
Horizonte de 7 días (\(\tau=7\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(7,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': ['sigmoid'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [100], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_at7, y_train_at7)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END activation=sigmoid, epochs=100, learning_rate=0.001; total time= 14.3s
[CV] END activation=sigmoid, epochs=100, learning_rate=0.001; total time= 14.4s
[CV] END activation=sigmoid, epochs=100, learning_rate=0.001; total time= 14.5s
[CV] END activation=sigmoid, epochs=100, learning_rate=0.001; total time= 14.6s
[CV] END activation=sigmoid, epochs=100, learning_rate=0.001; total time= 21.1s
Mejor función de activación: sigmoid
Mejor número de epocas: 100
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -0.5492504566907883
A idexación de parámetros función build_models con base a lo indicado en el númeral 3 del parcial práctico.
input_shape7 = 7
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP7_at = build_models_mlp(input_shape7, neurons_list, dropout_rates, 'sigmoid')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_137"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_138 (InputLayer) [(None, 7)] 0
dense_344 (Dense) (None, 32) 256
dense_345 (Dense) (None, 16) 528
dense_346 (Dense) (None, 16) 272
dropout_137 (Dropout) (None, 16) 0
dense_347 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_138"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_139 (InputLayer) [(None, 7)] 0
dense_348 (Dense) (None, 32) 256
dense_349 (Dense) (None, 16) 528
dense_350 (Dense) (None, 16) 272
dropout_138 (Dropout) (None, 16) 0
dense_351 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_139"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_140 (InputLayer) [(None, 7)] 0
dense_352 (Dense) (None, 32) 256
dense_353 (Dense) (None, 16) 528
dense_354 (Dense) (None, 16) 272
dropout_139 (Dropout) (None, 16) 0
dense_355 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_140"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_141 (InputLayer) [(None, 7)] 0
dense_356 (Dense) (None, 32) 256
dense_357 (Dense) (None, 16) 528
dense_358 (Dense) (None, 16) 272
dropout_140 (Dropout) (None, 16) 0
dense_359 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_141"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_142 (InputLayer) [(None, 7)] 0
dense_360 (Dense) (None, 32) 256
dense_361 (Dense) (None, 16) 528
dense_362 (Dense) (None, 16) 272
dropout_141 (Dropout) (None, 16) 0
dense_363 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_142"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_143 (InputLayer) [(None, 7)] 0
dense_364 (Dense) (None, 32) 256
dense_365 (Dense) (None, 16) 528
dense_366 (Dense) (None, 16) 272
dropout_142 (Dropout) (None, 16) 0
dense_367 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_143"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_144 (InputLayer) [(None, 7)] 0
dense_368 (Dense) (None, 32) 256
dense_369 (Dense) (None, 16) 528
dense_370 (Dense) (None, 16) 272
dropout_143 (Dropout) (None, 16) 0
dense_371 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_144"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_145 (InputLayer) [(None, 7)] 0
dense_372 (Dense) (None, 32) 256
dense_373 (Dense) (None, 16) 528
dense_374 (Dense) (None, 16) 272
dropout_144 (Dropout) (None, 16) 0
dense_375 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_145"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_146 (InputLayer) [(None, 7)] 0
dense_376 (Dense) (None, 32) 256
dense_377 (Dense) (None, 16) 528
dense_378 (Dense) (None, 16) 272
dropout_145 (Dropout) (None, 16) 0
dense_379 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_146"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_147 (InputLayer) [(None, 7)] 0
dense_380 (Dense) (None, 32) 256
dense_381 (Dense) (None, 16) 528
dense_382 (Dense) (None, 16) 272
dropout_146 (Dropout) (None, 16) 0
dense_383 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_147"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_148 (InputLayer) [(None, 7)] 0
dense_384 (Dense) (None, 32) 256
dense_385 (Dense) (None, 16) 528
dense_386 (Dense) (None, 16) 272
dropout_147 (Dropout) (None, 16) 0
dense_387 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_148"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_149 (InputLayer) [(None, 7)] 0
dense_388 (Dense) (None, 32) 256
dense_389 (Dense) (None, 16) 528
dense_390 (Dense) (None, 16) 272
dropout_148 (Dropout) (None, 16) 0
dense_391 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_149"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_150 (InputLayer) [(None, 7)] 0
dense_392 (Dense) (None, 32) 256
dense_393 (Dense) (None, 16) 528
dense_394 (Dense) (None, 16) 272
dropout_149 (Dropout) (None, 16) 0
dense_395 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_150"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_151 (InputLayer) [(None, 7)] 0
dense_396 (Dense) (None, 32) 256
dense_397 (Dense) (None, 16) 528
dense_398 (Dense) (None, 16) 272
dropout_150 (Dropout) (None, 16) 0
dense_399 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_151"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_152 (InputLayer) [(None, 7)] 0
dense_400 (Dense) (None, 32) 256
dense_401 (Dense) (None, 16) 528
dense_402 (Dense) (None, 16) 272
dropout_151 (Dropout) (None, 16) 0
dense_403 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_152"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_153 (InputLayer) [(None, 7)] 0
dense_404 (Dense) (None, 32) 256
dense_405 (Dense) (None, 16) 528
dense_406 (Dense) (None, 16) 272
dropout_152 (Dropout) (None, 16) 0
dense_407 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_at_MLP7_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best7_at = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP7.
import os
from joblib import dump, load
history_at_MPL7 = []
# Iterar sobre cada modelo en la lista models_MLP7
for i, model in enumerate(models_MLP7_at):
filename = f'history_at_MPL7_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_at7, y=y_train_at7, batch_size=16, epochs=100,
verbose=2, callbacks=[save_best7_at], validation_data=(X_val_at7, y_val_at7),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_at_MPL7.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_at_MPL7_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL7_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL7_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL7_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL7_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL7_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL7_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL7_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL7_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL7_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL7_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL7_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL7_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL7_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL7_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL7_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_at_MLP7_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model7_at = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model7_at = load_model(best_model_path) # Cargar el modelo
if best_model7_at is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_at_MLP7_weights.80-0.0780.keras con val_loss: 0.078
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
print("Archivos en el directorio 'keras_models':", files)
Archivos en el directorio 'keras_models': ['PRSA_data_vola14_MLP7_weights.04-0.0018.keras', 'PRSA_data_price_MLP21_weights.01-3579.4128.keras', 'PRSA_data_price_LSTM21_weights.01-43062.3047.keras', 'PRSA_data_price_LSTM28_weights.02-42892.2305.keras', 'PRSA_data_price_LSTM_weights.19-67016.3047.keras', 'PRSA_data_vola14_MLP21_weights.20-0.0011.keras', 'PRSA_data_price_LSTM14_weights.06-51394.8750.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.07-0.0033.keras', 'PRSA_data_price_LSTM21_weights.02-43052.0469.keras', 'PRSA_data_at_MLP7_weights.80-0.0780.keras', 'PRSA_data_vola21_LSTM28_weights.02-0.0022.keras', 'PRSA_data_vola21_LSTM14_weights.04-0.0011.keras', 'PRSA_data_vola7_LSTM21_weights.18-0.0029.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0013.keras', 'PRSA_data_vola28_MLP14_weights.04-0.0013.keras', 'PRSA_data_price_MLP14_weights.52-626.2623.keras', 'PRSA_data_vola7_MLP7_weights.15-0.0041.keras', 'PRSA_data_price_LSTM14_weights.13-51325.8516.keras', 'PRSA_data_vola14_MLP7_weights.02-0.0023.keras', 'PRSA_data_price_LSTM21_weights.11-42962.6562.keras', 'PRSA_data_vola7_LSTM28_weights.10-0.0037.keras', 'PRSA_data_at_LSTM_weights.04-1.2071.keras', 'PRSA_data_price_MLP7_weights.50-1342.5273.keras', 'PRSA_data_price_MLP14_weights.03-5108.7837.keras', 'PRSA_data_vola28_MLP7_weights.14-0.0012.keras', 'PRSA_data_at_MLP7_weights.11-2.4883.keras', 'PRSA_data_price_LSTM28_weights.08-42832.5430.keras', 'PRSA_data_price_MLP28_weights.01-9242.6553.keras', 'PRSA_data_vola21_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.16-1013.2251.keras', 'PRSA_data_price_MLP28_weights.13-1042.5178.keras', 'PRSA_data_vola28_MLP28_weights.34-0.0006.keras', 'PRSA_data_at_MLP7_weights.01-19.6635.keras', 'PRSA_data_at_MLP7_weights.19-1.8379.keras', 'PRSA_data_at_MLP7_weights.12-2.3991.keras', 'PRSA_data_price_MLP7_weights.15-1346.3275.keras', 'PRSA_data_vola14_MLP28_weights.02-0.0024.keras', 'PRSA_data_vola14_MLP28_weights.07-0.0017.keras', 'PRSA_data_at_MLP28_weights.05-0.0930.keras', 'PRSA_data_price_LSTM14_weights.20-51256.7266.keras', 'PRSA_data_vola21_LSTM21_weights.05-0.0013.keras', 'PRSA_data_price_MLP14_weights.12-1304.7009.keras', 'PRSA_data_vola7_LSTM14_weights.09-0.0044.keras', 'PRSA_data_vola28_MLP28_weights.50-0.0006.keras', 'PRSA_data_vola7_MLP14_weights.15-0.0020.keras', 'PRSA_data_price_MLP14_weights.08-1386.5365.keras', 'PRSA_data_price_LSTM21_weights.09-42982.2383.keras', 'PRSA_data_price_MLP28_weights.11-7366.8994.keras', 'PRSA_data_vola14_MLP7_weights.18-0.0015.keras', 'PRSA_data_price_MLP7_weights.14-1734.7238.keras', 'PRSA_data_price_LSTM14_weights.15-51306.2070.keras', 'PRSA_data_vola21_MLP28_weights.25-0.0010.keras', 'PRSA_data_at_LSTM14_weights.02-2.2175.keras', 'PRSA_data_vola28_LSTM7_weights.08-0.0014.keras', 'PRSA_data_price_MLP28_weights.10-1107.4047.keras', 'PRSA_data_vola28_MLP7_weights.01-0.0068.keras', 'PRSA_data_vola21_LSTM21_weights.03-0.0015.keras', 'PRSA_data_price_MLP7_weights.01-9253.3047.keras', 'PRSA_data_vola21_MLP14_weights.31-0.0007.keras', 'PRSA_data_price_LSTM28_weights.12-42793.6172.keras', 'PRSA_data_price_LSTM14_weights.09-51365.0000.keras', 'PRSA_data_vola28_MLP28_weights.42-0.0006.keras', 'PRSA_data_price_MLP7_weights.05-4792.5054.keras', 'PRSA_data_vola21_LSTM_weights.01-0.0024.keras', 'PRSA_data_price_LSTM28_weights.17-42745.0352.keras', 'PRSA_data_vola28_MLP28_weights.45-0.0006.keras', 'PRSA_data_vola28_MLP14_weights.08-0.0010.keras', 'PRSA_data_at_LSTM_weights.11-0.6498.keras', 'PRSA_data_vola28_MLP28_weights.39-0.0007.keras', 'PRSA_data_at_MLP21_weights.11-0.0326.keras', 'PRSA_data_price_LSTM_weights.20-67006.4141.keras', 'PRSA_data_vola28_MLP7_weights.06-0.0014.keras', 'PRSA_data_at_LSTM14_weights.06-0.8145.keras', 'PRSA_data_at_MLP28_weights.01-2.1902.keras', 'PRSA_data_price_MLP14_weights.46-814.1992.keras', '.DS_Store', 'PRSA_data_vola28_LSTM28_weights.12-0.0006.keras', 'PRSA_data_price_LSTM14_weights.18-51276.5625.keras', 'PRSA_data_vola21_LSTM14_weights.01-0.0078.keras', 'PRSA_data_vola14_MLP21_weights.04-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM14_weights.02-0.0011.keras', 'PRSA_data_at_MLP7_weights.09-2.9793.keras', 'PRSA_data_vola21_MLP14_weights.06-0.0007.keras', 'PRSA_data_vola14_MLP21_weights.78-0.0010.keras', 'PRSA_data_price_MLP14_weights.64-711.9107.keras', 'PRSA_data_vola14_MLP28_weights.31-0.0011.keras', 'PRSA_data_vola14_LSTM_weights.08-0.0020.keras', 'PRSA_data_vola14_LSTM21_weights.02-0.0018.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0014.keras', 'PRSA_data_vola14_MLP14_weights.29-0.0005.keras', 'PRSA_data_price_LSTM_weights.03-67175.7734.keras', 'PRSA_data_vola14_LSTM_weights.01-0.0068.keras', 'PRSA_data_price_LSTM28_weights.11-42803.3086.keras', 'PRSA_data_price_MLP28_weights.15-7107.3306.keras', 'PRSA_data_vola14_MLP21_weights.49-0.0010.keras', 'PRSA_data_vola28_MLP7_weights.07-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0010.keras', 'PRSA_data_at_LSTM14_weights.15-0.4082.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0011.keras', 'PRSA_data_vola14_MLP14_weights.06-0.0006.keras', 'PRSA_data_vola14_LSTM21_weights.06-0.0012.keras', 'PRSA_data_price_LSTM_weights.17-67036.2891.keras', 'PRSA_data_vola28_MLP7_weights.03-0.0018.keras', 'PRSA_data_vola7_MLP28_weights.02-0.0039.keras', 'PRSA_data_vola7_LSTM28_weights.16-0.0035.keras', 'PRSA_data_vola21_MLP7_weights.20-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.33-0.0009.keras', 'PRSA_data_price_MLP21_weights.64-870.7020.keras', 'PRSA_data_vola21_MLP14_weights.22-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.14-0.0020.keras', 'PRSA_data_price_LSTM21_weights.17-42903.9648.keras', 'PRSA_data_vola21_MLP14_weights.01-0.0021.keras', 'PRSA_data_vola21_LSTM7_weights.16-0.0011.keras', 'PRSA_data_at_LSTM14_weights.11-0.6656.keras', 'PRSA_data_vola28_MLP7_weights.02-0.0019.keras', 'PRSA_data_vola14_MLP28_weights.05-0.0018.keras', 'PRSA_data_vola14_LSTM14_weights.08-0.0005.keras', 'PRSA_data_at_MLP21_weights.02-0.8171.keras', 'PRSA_data_price_MLP28_weights.02-8134.2202.keras', 'PRSA_data_price_LSTM28_weights.06-42852.3320.keras', 'PRSA_data_vola7_MLP14_weights.02-0.0036.keras', 'PRSA_data_vola14_LSTM28_weights.01-0.0026.keras', 'PRSA_data_at_MLP7_weights.04-9.8837.keras', 'PRSA_data_price_MLP28_weights.16-1274.4368.keras', 'PRSA_data_price_LSTM21_weights.07-43002.0078.keras', 'PRSA_data_vola7_MLP28_weights.23-0.0032.keras', 'PRSA_data_price_LSTM_weights.01-67196.3359.keras', 'PRSA_data_at_MLP28_weights.02-1.9156.keras', 'PRSA_data_vola14_MLP21_weights.02-0.0016.keras', 'PRSA_data_vola28_MLP14_weights.14-0.0007.keras', 'PRSA_data_price_LSTM28_weights.07-42842.4375.keras', 'PRSA_data_price_MLP14_weights.37-825.1805.keras', 'PRSA_data_vola7_MLP14_weights.07-0.0021.keras', 'PRSA_data_price_LSTM14_weights.04-51414.8945.keras', 'PRSA_data_price_MLP7_weights.42-1446.1339.keras', 'PRSA_data_price_MLP21_weights.57-932.6497.keras', 'PRSA_data_price_LSTM14_weights.19-51266.6133.keras', 'PRSA_data_vola21_MLP14_weights.34-0.0008.keras', 'PRSA_data_price_MLP14_weights.01-10485.8262.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0012.keras', 'PRSA_data_at_LSTM_weights.02-2.4966.keras', 'PRSA_data_vola21_LSTM28_weights.15-0.0009.keras', 'PRSA_data_price_LSTM_weights.20-67006.3047.keras', 'PRSA_data_price_LSTM_weights.14-67066.1016.keras', 'PRSA_data_price_LSTM21_weights.19-42884.2422.keras', 'PRSA_data_vola28_LSTM21_weights.18-0.0008.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0013.keras', 'PRSA_data_price_LSTM28_weights.20-42714.5508.keras', 'PRSA_data_at_LSTM14_weights.13-0.5609.keras', 'PRSA_data_vola28_LSTM28_weights.04-0.0009.keras', 'PRSA_data_vola14_MLP21_weights.01-0.0118.keras', 'PRSA_data_vola7_MLP21_weights.02-0.0032.keras', 'PRSA_data_at_MLP7_weights.74-0.2838.keras', 'PRSA_data_price_MLP28_weights.17-1217.0461.keras', 'PRSA_data_at_MLP21_weights.16-1.1973.keras', 'PRSA_data_vola7_MLP7_weights.02-0.0036.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0015.keras', 'PRSA_data_vola14_LSTM14_weights.04-0.0006.keras', 'PRSA_data_vola28_LSTM28_weights.18-0.0006.keras', 'PRSA_data_price_MLP21_weights.74-805.9489.keras', 'PRSA_data_vola21_MLP14_weights.02-0.0011.keras', 'PRSA_data_vola28_MLP21_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM_weights.11-0.0014.keras', 'PRSA_data_vola7_LSTM14_weights.14-0.0036.keras', 'PRSA_data_at_MLP28_weights.03-1.7712.keras', 'PRSA_data_price_LSTM21_weights.20-42874.3555.keras', 'PRSA_data_vola28_LSTM7_weights.20-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.04-0.0009.keras', 'PRSA_data_at_MLP7_weights.08-3.6716.keras', 'PRSA_data_at_MLP21_weights.04-0.0734.keras', 'PRSA_data_vola21_LSTM28_weights.03-0.0021.keras', 'PRSA_data_vola21_MLP14_weights.35-0.0008.keras', 'PRSA_data_vola28_LSTM7_weights.11-0.0012.keras', 'PRSA_data_price_LSTM28_weights.04-42872.1953.keras', 'PRSA_data_at_LSTM_weights.08-0.4803.keras', 'PRSA_data_vola28_MLP14_weights.05-0.0010.keras', 'PRSA_data_price_LSTM14_weights.14-51316.0625.keras', 'PRSA_data_vola7_MLP14_weights.01-0.0101.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0015.keras', 'PRSA_data_vola7_LSTM_weights.01-0.0040.keras', 'PRSA_data_vola14_MLP21_weights.09-0.0011.keras', 'PRSA_data_price_LSTM21_weights.05-43021.9570.keras', 'PRSA_data_vola28_MLP28_weights.03-0.0007.keras', 'PRSA_data_vola7_LSTM14_weights.10-0.0042.keras', 'PRSA_data_price_LSTM_weights.07-67135.3359.keras', 'PRSA_data_vola7_MLP14_weights.18-0.0020.keras', 'PRSA_data_price_LSTM21_weights.18-42894.1055.keras', 'PRSA_data_vola7_MLP7_weights.01-0.0042.keras', 'PRSA_data_vola14_LSTM_weights.05-0.0024.keras', 'PRSA_data_vola21_MLP14_weights.27-0.0008.keras', 'PRSA_data_vola28_MLP28_weights.02-0.0018.keras', 'PRSA_data_vola21_LSTM7_weights.03-0.0017.keras', 'PRSA_data_price_LSTM_weights.10-67105.4297.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0010.keras', 'PRSA_data_vola7_MLP21_weights.03-0.0030.keras', 'PRSA_data_at_MLP7_weights.06-5.3724.keras', 'PRSA_data_at_LSTM_weights.05-1.1226.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0026.keras', 'PRSA_data_vola21_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola14_MLP21_weights.05-0.0012.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.08-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.47-0.0008.keras', 'PRSA_data_price_LSTM14_weights.16-51296.3633.keras', 'PRSA_data_at_MLP7_weights.02-16.3421.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0030.keras', 'PRSA_data_vola21_MLP21_weights.09-0.0009.keras', 'PRSA_data_at_LSTM14_weights.01-6.2168.keras', 'PRSA_data_vola28_LSTM21_weights.01-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.16-0.0009.keras', 'PRSA_data_vola21_LSTM28_weights.07-0.0016.keras', 'PRSA_data_vola21_MLP28_weights.19-0.0010.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0009.keras', 'PRSA_data_vola14_MLP7_weights.09-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.02-0.0011.keras', 'PRSA_data_vola28_LSTM14_weights.03-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.09-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.09-0.0012.keras', 'PRSA_data_vola28_MLP21_weights.65-0.0007.keras', 'PRSA_data_vola21_MLP28_weights.15-0.0011.keras', 'PRSA_data_at_MLP7_weights.42-0.8694.keras', 'PRSA_data_vola21_LSTM28_weights.18-0.0008.keras', 'PRSA_data_vola21_MLP14_weights.04-0.0008.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0040.keras', 'PRSA_data_vola21_LSTM14_weights.12-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0029.keras', 'PRSA_data_vola21_MLP28_weights.01-0.0026.keras', 'PRSA_data_price_LSTM_weights.13-67075.9453.keras', 'PRSA_data_vola14_MLP7_weights.01-0.0041.keras', 'PRSA_data_price_MLP21_weights.02-1476.1034.keras', 'PRSA_data_vola14_LSTM28_weights.19-0.0014.keras', 'PRSA_data_price_MLP14_weights.96-663.0834.keras', 'PRSA_data_vola7_MLP21_weights.94-0.0024.keras', 'PRSA_data_price_LSTM21_weights.03-43041.9570.keras', 'PRSA_data_price_MLP7_weights.12-3798.9531.keras', 'PRSA_data_price_LSTM28_weights.10-42813.0352.keras', 'PRSA_data_vola14_MLP28_weights.20-0.0014.keras', 'PRSA_data_vola7_MLP21_weights.04-0.0027.keras', 'PRSA_data_vola7_LSTM28_weights.18-0.0035.keras', 'PRSA_data_at_MLP7_weights.10-2.7632.keras', 'PRSA_data_price_LSTM_weights.06-67145.4141.keras', 'PRSA_data_price_LSTM28_weights.13-42783.9805.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM28_weights.01-42902.4570.keras', 'PRSA_data_price_LSTM_weights.20-67005.9609.keras', 'PRSA_data_vola7_LSTM21_weights.04-0.0034.keras', 'PRSA_data_at_MLP14_weights.03-0.0461.keras', 'PRSA_data_vola28_MLP28_weights.01-0.0042.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0455.keras', 'PRSA_data_price_MLP28_weights.17-1182.9105.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0018.keras', 'PRSA_data_vola28_MLP14_weights.38-0.0006.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0013.keras', 'PRSA_data_vola7_LSTM21_weights.01-0.0038.keras', 'PRSA_data_price_LSTM14_weights.17-51286.4570.keras', 'PRSA_data_price_LSTM28_weights.16-42754.8008.keras', 'PRSA_data_price_LSTM14_weights.11-51345.4023.keras', 'PRSA_data_price_MLP14_weights.22-638.4409.keras', 'PRSA_data_vola7_MLP28_weights.32-0.0029.keras', 'PRSA_data_price_MLP7_weights.48-1648.1864.keras', 'PRSA_data_vola28_LSTM28_weights.10-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.03-0.0021.keras', 'PRSA_data_vola21_LSTM21_weights.13-0.0010.keras', 'PRSA_data_at_MLP7_weights.07-4.2627.keras', 'PRSA_data_vola7_LSTM21_weights.17-0.0029.keras', 'PRSA_data_vola21_LSTM28_weights.08-0.0011.keras', 'PRSA_data_price_MLP21_weights.05-1072.5081.keras', 'PRSA_data_vola7_LSTM21_weights.05-0.0030.keras', 'PRSA_data_price_MLP14_weights.29-856.8320.keras', 'PRSA_data_vola14_LSTM21_weights.11-0.0011.keras', 'PRSA_data_price_LSTM21_weights.10-42972.4219.keras', 'PRSA_data_price_LSTM_weights.02-67185.9453.keras', 'PRSA_data_vola28_MLP14_weights.21-0.0007.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.10-0.0009.keras', 'PRSA_data_price_LSTM28_weights.15-42764.5898.keras', 'PRSA_data_at_LSTM14_weights.07-0.7059.keras', 'PRSA_data_vola21_LSTM21_weights.01-0.0048.keras', 'PRSA_data_at_LSTM14_weights.03-1.3422.keras', 'PRSA_data_vola14_LSTM14_weights.02-0.0015.keras', 'PRSA_data_vola14_MLP14_weights.02-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.02-0.0034.keras', 'PRSA_data_vola7_MLP21_weights.21-0.0025.keras', 'PRSA_data_at_MLP7_weights.18-1.8396.keras', 'PRSA_data_price_LSTM_weights.11-67095.6172.keras', 'PRSA_data_at_LSTM14_weights.12-0.6378.keras', 'PRSA_data_vola21_LSTM21_weights.07-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.45-0.0030.keras', 'PRSA_data_at_MLP7_weights.26-0.8897.keras', 'PRSA_data_vola21_MLP7_weights.01-0.0014.keras', 'PRSA_data_price_MLP14_weights.19-982.5739.keras', 'PRSA_data_vola14_LSTM_weights.16-0.0017.keras', 'PRSA_data_price_MLP21_weights.94-813.8954.keras', 'PRSA_data_vola7_LSTM21_weights.20-0.0029.keras', 'PRSA_data_at_LSTM_weights.03-1.4842.keras', 'PRSA_data_vola21_MLP28_weights.02-0.0015.keras', 'PRSA_data_price_LSTM21_weights.12-42952.9453.keras', 'PRSA_data_price_MLP28_weights.17-1019.2816.keras', 'PRSA_data_vola14_LSTM_weights.13-0.0019.keras', 'PRSA_data_vola7_LSTM_weights.02-0.0038.keras', 'PRSA_data_vola7_LSTM28_weights.02-0.0042.keras', 'PRSA_data_at_LSTM14_weights.13-0.2270.keras', 'PRSA_data_price_LSTM_weights.18-67026.2891.keras', 'PRSA_data_vola28_MLP21_weights.66-0.0007.keras', 'PRSA_data_at_MLP14_weights.42-0.0373.keras', 'PRSA_data_vola7_MLP14_weights.38-0.0020.keras', 'PRSA_data_vola7_LSTM21_weights.11-0.0029.keras', 'PRSA_data_vola21_LSTM21_weights.12-0.0010.keras', 'PRSA_data_at_LSTM_weights.14-0.5158.keras', 'PRSA_data_vola14_LSTM21_weights.01-0.0019.keras', 'PRSA_data_vola28_LSTM21_weights.11-0.0009.keras', 'PRSA_data_vola7_LSTM28_weights.05-0.0038.keras', 'PRSA_data_vola14_MLP14_weights.10-0.0005.keras', 'PRSA_data_at_MLP21_weights.03-2.1540.keras', 'PRSA_data_at_LSTM_weights.16-0.5377.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.44-0.0008.keras', 'PRSA_data_at_MLP7_weights.16-1.9941.keras', 'PRSA_data_vola28_LSTM28_weights.01-0.0010.keras', 'PRSA_data_vola28_LSTM14_weights.19-0.0006.keras', 'PRSA_data_price_LSTM21_weights.14-42933.4180.keras', 'PRSA_data_vola14_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.82-828.7976.keras', 'PRSA_data_vola28_MLP21_weights.01-0.0030.keras', 'PRSA_data_price_LSTM14_weights.10-51355.2070.keras', 'PRSA_data_vola21_MLP21_weights.18-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.73-0.0007.keras', 'PRSA_data_vola28_MLP7_weights.20-0.0011.keras', 'PRSA_data_price_LSTM21_weights.13-42943.1719.keras', 'PRSA_data_vola28_MLP28_weights.36-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.04-0.0025.keras', 'PRSA_data_at_MLP14_weights.02-0.2715.keras', 'PRSA_data_price_LSTM14_weights.05-51404.8945.keras', 'PRSA_data_price_LSTM28_weights.19-42725.4297.keras', 'PRSA_data_price_MLP28_weights.01-1561.6656.keras', 'PRSA_data_vola21_MLP7_weights.14-0.0011.keras', 'PRSA_data_vola7_MLP7_weights.23-0.0033.keras', 'PRSA_data_vola14_MLP28_weights.01-0.0026.keras', 'PRSA_data_at_MLP14_weights.29-0.0323.keras', 'PRSA_data_vola7_MLP21_weights.01-0.0033.keras', 'PRSA_data_vola21_LSTM_weights.19-0.0014.keras', 'PRSA_data_price_MLP7_weights.03-5953.8076.keras', 'PRSA_data_price_MLP21_weights.42-948.6628.keras', 'PRSA_data_vola21_LSTM14_weights.07-0.0008.keras', 'PRSA_data_vola14_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola7_LSTM28_weights.13-0.0037.keras', 'PRSA_data_vola21_MLP21_weights.01-0.0016.keras', 'PRSA_data_vola14_LSTM21_weights.08-0.0011.keras', 'PRSA_data_price_LSTM21_weights.06-43011.9570.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0019.keras', 'PRSA_data_vola7_MLP28_weights.16-0.0033.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0008.keras', 'PRSA_data_vola28_MLP14_weights.01-0.0031.keras', 'PRSA_data_vola14_MLP28_weights.11-0.0015.keras', 'PRSA_data_price_LSTM14_weights.03-51425.0000.keras', 'PRSA_data_vola7_LSTM28_weights.04-0.0039.keras', 'PRSA_data_vola28_LSTM7_weights.13-0.0011.keras', 'PRSA_data_price_LSTM14_weights.01-51445.4883.keras', 'PRSA_data_vola28_MLP28_weights.22-0.0006.keras', 'PRSA_data_price_LSTM_weights.09-67115.3359.keras', 'PRSA_data_vola28_LSTM7_weights.14-0.0011.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0050.keras', 'PRSA_data_vola21_MLP28_weights.17-0.0010.keras', 'PRSA_data_vola28_LSTM21_weights.09-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.13-0.0010.keras', 'PRSA_data_vola14_MLP14_weights.01-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.50-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.2184.keras', 'PRSA_data_vola14_LSTM_weights.02-0.0030.keras', 'PRSA_data_vola7_MLP28_weights.39-0.0030.keras', 'PRSA_data_price_LSTM21_weights.04-43031.9414.keras', 'PRSA_data_price_LSTM_weights.15-67056.2109.keras', 'PRSA_data_at_MLP7_weights.20-1.6814.keras', 'PRSA_data_vola28_LSTM7_weights.18-0.0010.keras', 'PRSA_data_at_MLP7_weights.95-0.2130.keras', 'PRSA_data_vola14_LSTM28_weights.03-0.0021.keras', 'PRSA_data_at_LSTM_weights.01-7.5653.keras', 'PRSA_data_at_MLP7_weights.05-7.2110.keras', 'PRSA_data_vola21_LSTM28_weights.20-0.0008.keras', 'PRSA_data_at_LSTM_weights.07-0.8569.keras', 'PRSA_data_vola14_LSTM28_weights.12-0.0014.keras', 'PRSA_data_price_LSTM21_weights.16-42913.7969.keras', 'PRSA_data_at_MLP28_weights.04-1.3772.keras', 'PRSA_data_price_LSTM_weights.08-67125.3047.keras', 'PRSA_data_price_LSTM14_weights.07-51384.8633.keras', 'PRSA_data_price_LSTM_weights.04-67165.6016.keras', 'PRSA_data_vola14_LSTM28_weights.15-0.0014.keras', 'PRSA_data_at_MLP14_weights.01-0.4195.keras', 'PRSA_data_price_LSTM28_weights.09-42822.7578.keras', 'PRSA_data_price_LSTM28_weights.03-42882.1953.keras', 'PRSA_data_at_LSTM14_weights.05-0.8408.keras', 'PRSA_data_vola7_MLP28_weights.08-0.0030.keras', 'PRSA_data_price_MLP14_weights.07-3004.8474.keras', 'PRSA_data_price_LSTM21_weights.15-42923.6367.keras', 'PRSA_data_vola7_MLP21_weights.81-0.0025.keras', 'PRSA_data_price_MLP21_weights.86-819.7106.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.05-0.0012.keras', 'PRSA_data_vola21_MLP14_weights.20-0.0007.keras', 'PRSA_data_vola14_MLP28_weights.42-0.0011.keras', 'PRSA_data_price_LSTM_weights.12-67085.7578.keras', 'PRSA_data_vola28_MLP21_weights.13-0.0008.keras', 'PRSA_data_vola14_MLP28_weights.08-0.0016.keras', 'PRSA_data_at_MLP7_weights.86-0.0883.keras', 'PRSA_data_vola28_LSTM14_weights.01-0.0020.keras', 'PRSA_data_vola21_MLP21_weights.71-0.0010.keras', 'PRSA_data_at_MLP28_weights.09-0.1537.keras', 'PRSA_data_vola7_MLP28_weights.48-0.0030.keras', 'PRSA_data_at_MLP28_weights.12-0.0354.keras', 'PRSA_data_at_LSTM14_weights.04-0.9732.keras', 'PRSA_data_at_LSTM14_weights.15-0.2154.keras', 'PRSA_data_vola14_MLP28_weights.21-0.0013.keras', 'PRSA_data_vola21_MLP14_weights.25-0.0009.keras', 'PRSA_data_vola14_LSTM14_weights.01-0.0027.keras', 'PRSA_data_vola21_LSTM28_weights.01-0.0039.keras', 'PRSA_data_price_MLP14_weights.02-8059.2510.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0013.keras', 'PRSA_data_vola28_MLP7_weights.04-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.02-0.0043.keras', 'PRSA_data_price_MLP14_weights.78-683.9230.keras', 'PRSA_data_price_MLP21_weights.19-1002.5925.keras', 'PRSA_data_vola28_MLP21_weights.92-0.0007.keras', 'PRSA_data_price_LSTM28_weights.14-42774.2500.keras', 'PRSA_data_vola21_MLP21_weights.06-0.0010.keras', 'PRSA_data_vola14_LSTM14_weights.03-0.0010.keras', 'PRSA_data_price_LSTM_weights.16-67046.2422.keras', 'PRSA_data_vola7_LSTM28_weights.07-0.0037.keras', 'PRSA_data_vola28_LSTM7_weights.01-0.0017.keras', 'PRSA_data_vola28_LSTM14_weights.02-0.0008.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0022.keras', 'PRSA_data_at_MLP21_weights.03-0.2039.keras', 'PRSA_data_vola14_LSTM28_weights.02-0.0023.keras', 'PRSA_data_at_MLP7_weights.03-13.2212.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0037.keras', 'PRSA_data_vola14_MLP21_weights.69-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.18-0.0013.keras', 'PRSA_data_vola7_MLP21_weights.49-0.0025.keras', 'PRSA_data_vola28_MLP28_weights.31-0.0006.keras', 'PRSA_data_price_MLP21_weights.77-841.5739.keras', 'PRSA_data_vola28_MLP14_weights.35-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.26-0.0031.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0065.keras', 'PRSA_data_price_LSTM14_weights.02-51435.1133.keras', 'PRSA_data_price_LSTM28_weights.20-42714.7695.keras', 'PRSA_data_vola28_LSTM28_weights.16-0.0006.keras', 'PRSA_data_vola14_LSTM28_weights.06-0.0016.keras', 'PRSA_data_vola21_MLP21_weights.13-0.0010.keras', 'PRSA_data_vola28_MLP14_weights.32-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.01-0.0043.keras', 'PRSA_data_vola21_MLP28_weights.08-0.0011.keras', 'PRSA_data_vola21_LSTM28_weights.10-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.14-0.0012.keras', 'PRSA_data_vola7_LSTM28_weights.01-0.0049.keras', 'PRSA_data_price_LSTM28_weights.20-42715.6250.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM_weights.05-67155.5078.keras', 'PRSA_data_price_MLP14_weights.14-1073.6493.keras', 'PRSA_data_vola14_LSTM_weights.03-0.0029.keras', 'PRSA_data_vola14_MLP7_weights.07-0.0021.keras', 'PRSA_data_at_MLP28_weights.03-0.0637.keras', 'PRSA_data_vola21_MLP28_weights.03-0.0012.keras', 'PRSA_data_at_MLP28_weights.05-0.1823.keras', 'PRSA_data_at_MLP21_weights.01-0.9762.keras', 'PRSA_data_vola7_LSTM28_weights.15-0.0035.keras', 'PRSA_data_price_LSTM21_weights.08-42992.0703.keras', 'PRSA_data_vola21_MLP21_weights.22-0.0010.keras', 'PRSA_data_vola28_LSTM28_weights.20-0.0006.keras', 'PRSA_data_at_MLP21_weights.01-4.3241.keras', 'PRSA_data_at_LSTM_weights.06-0.8671.keras', 'PRSA_data_price_MLP7_weights.37-1654.4709.keras', 'PRSA_data_at_LSTM_weights.20-0.4626.keras', 'PRSA_data_vola21_MLP28_weights.27-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.4729.keras', 'PRSA_data_at_MLP7_weights.24-1.4718.keras', 'PRSA_data_at_MLP7_weights.59-0.4636.keras', 'PRSA_data_vola14_LSTM14_weights.06-0.0005.keras', 'PRSA_data_at_MLP7_weights.14-2.2322.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0196.keras', 'PRSA_data_at_LSTM14_weights.19-0.3356.keras', 'PRSA_data_price_LSTM28_weights.18-42735.2305.keras', 'PRSA_data_price_LSTM14_weights.12-51335.6523.keras', 'PRSA_data_vola21_LSTM_weights.06-0.0017.keras', 'PRSA_data_price_LSTM28_weights.05-42862.2383.keras', 'PRSA_data_price_MLP7_weights.10-1248.9592.keras', 'PRSA_data_vola21_MLP28_weights.35-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.03-0.0013.keras', 'PRSA_data_price_MLP14_weights.04-5028.7607.keras', 'PRSA_data_price_LSTM14_weights.08-51374.8945.keras', 'PRSA_data_vola28_MLP14_weights.48-0.0007.keras']
# Predicciones usando el mejor modelo
if best_model7_at is not None:
train_preds_at_MLP7 = best_model7_at.predict(X_train_at7)
val_preds_at_MLP7 = best_model7_at.predict(X_val_at7)
test_preds_at_MLP7 = best_model7_at.predict(X_test_at7)
# Aplanar las predicciones si es necesario
train_preds_at_MLP7 = np.squeeze(train_preds_at_MLP7)
val_preds_at_MLP7 = np.squeeze(val_preds_at_MLP7)
test_preds_at_MLP7 = np.squeeze(test_preds_at_MLP7)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_at_MLP7)
print("Predicciones de validación:", val_preds_at_MLP7)
print("Predicciones de prueba:", test_preds_at_MLP7)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
156/156 [==============================] - 0s 199us/step
1/1 [==============================] - 0s 9ms/step
1/1 [==============================] - 0s 9ms/step
Predicciones de Entrenamiento: [ 1.4620519 1.4620519 1.4620519 ... 23.603428 23.603817 23.600698 ]
Predicciones de validación: [23.588745 23.62459 23.612099 23.621899 23.61894 23.619183 23.645409]
Predicciones de prueba: [23.628016 23.651365 23.654556 23.664265 23.65876 23.641209 23.62173 ]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 28 días (\(\tau = 28\)).
data_train_plot_at7, data_val_plot_at7, data_test_plot_at7 = data_plot(df_1_st['A_t'], 7)
Datos de entrenamiento:
4998 0.0000
4997 0.0000
4996 0.0000
4995 0.0000
4994 0.0000
...
18 23.6804
17 23.6921
16 23.7118
15 23.7147
14 23.7234
Name: A_t, Length: 4985, dtype: float647
Datos de validación:
13 23.7689
12 23.7602
11 23.7825
10 23.7595
9 23.7326
8 23.6728
7 23.7199
Name: A_t, dtype: float647
Datos de prueba:
6 23.7083
5 23.6263
4 23.7198
3 23.6852
2 23.6589
1 23.6629
0 23.7125
Name: A_t, dtype: float647
plot_model(data_train_plot_at7[-100:], data_val_plot_at7, data_test_plot_at7, val_preds_at_MLP7, test_preds_at_MLP7, "Predicciones usando Perceptrón Multicapa (MLP)")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train7_at, jarque_bera_pval_MLP_train7_at = diagnostic_plots(y_train_at7, train_preds_at_MLP7)
Ljung-Box LB Statistic: 4643.302678
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_at_MLP_train = metricas(y_train_at7,train_preds_at_MLP7)
metrica_at_MLP_train.index = metrica_at_MLP_train.index.map({0: 'MLP Entrenamiento Retorno Acumulado τ = 7'})
metrica_at_MLP_train['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train7_at], index=metrica_at_MLP_train.index)
metrica_at_MLP_train['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train7_at], index=metrica_at_MLP_train.index)
metrica_at_MLP_train
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Retorno Acumulado τ = 7 | 3105.4987 | 4.87% | 0.69 | 0.62 | 98.32% | 0.0 | 1.4771e-18 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP7_at, jarque_bera_pvalMLP7_at = evaluate_residuals(data_test_plot_at7, test_preds_at_MLP7)
No se rechaza H0: los residuales son independientes (no correlacionados).
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP7_test_at = metricas(y_test_at7,test_preds_at_MLP7)
metrica_MLP7_test_at.index = metrica_MLP7_test_at.index.map({0: 'MLP Prueba Retorno Acumulado τ = 7'})
metrica_MLP7_test_at['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP7_at], index=metrica_MLP7_test_at.index)
metrica_MLP7_test_at['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP7_at], index=metrica_MLP7_test_at.index)
metrica_MLP7_test_at
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Retorno Acumulado τ = 7 | 0.0732 | 0.41% | 0.1 | 0.01 | -775.94% | 0.182 | 0.7553 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_at_MPL7)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_at7, train_preds_at_MLP7, y_val_at7, val_preds_at_MLP7, y_test_at7, test_preds_at_MLP7)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra muy cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo. De igual forma se observa una alta variabilidad en los residuales.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 14 días (\(\tau=14\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(14,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [50], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_at14, y_train_at14)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 8.9s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 8.9s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 9.1s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 9.2s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 9.3s
Mejor función de activación: relu
Mejor número de epocas: 50
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -0.6532162189483642
A continuación, configuramos la red MLP empleando la API funcional de Keras. Con este método, una capa puede ser designada como la entrada para la siguiente capa en el momento de su definición.
En este contexto, Input es una función utilizada para establecer una capa de entrada en un modelo de red neuronal. La propiedad shape=(14,) define la estructura de los datos de entrada, lo que indica que estos tendrán 14 dimensiones. Por otro lado, dtype='float32' especifica que los elementos de esta capa serán números de punto flotante de 32 bits
A continuación se define función buil_models para generación de modelos de acuerdo a los parámetros indexados a tráves de la lista.
A idexación de parámetros función build_models_mlp con base a lo indicado en el númeral 3 del parcial práctico.
input_shape14 = 14
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP14_at = build_models_mlp(input_shape14, neurons_list, dropout_rates, 'relu')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_154"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_155 (InputLayer) [(None, 14)] 0
dense_412 (Dense) (None, 32) 480
dense_413 (Dense) (None, 16) 528
dense_414 (Dense) (None, 16) 272
dropout_154 (Dropout) (None, 16) 0
dense_415 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_155"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_156 (InputLayer) [(None, 14)] 0
dense_416 (Dense) (None, 32) 480
dense_417 (Dense) (None, 16) 528
dense_418 (Dense) (None, 16) 272
dropout_155 (Dropout) (None, 16) 0
dense_419 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_156"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_157 (InputLayer) [(None, 14)] 0
dense_420 (Dense) (None, 32) 480
dense_421 (Dense) (None, 16) 528
dense_422 (Dense) (None, 16) 272
dropout_156 (Dropout) (None, 16) 0
dense_423 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_157"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_158 (InputLayer) [(None, 14)] 0
dense_424 (Dense) (None, 32) 480
dense_425 (Dense) (None, 16) 528
dense_426 (Dense) (None, 16) 272
dropout_157 (Dropout) (None, 16) 0
dense_427 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_158"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_159 (InputLayer) [(None, 14)] 0
dense_428 (Dense) (None, 32) 480
dense_429 (Dense) (None, 16) 528
dense_430 (Dense) (None, 16) 272
dropout_158 (Dropout) (None, 16) 0
dense_431 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_159"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_160 (InputLayer) [(None, 14)] 0
dense_432 (Dense) (None, 32) 480
dense_433 (Dense) (None, 16) 528
dense_434 (Dense) (None, 16) 272
dropout_159 (Dropout) (None, 16) 0
dense_435 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_160"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_161 (InputLayer) [(None, 14)] 0
dense_436 (Dense) (None, 32) 480
dense_437 (Dense) (None, 16) 528
dense_438 (Dense) (None, 16) 272
dropout_160 (Dropout) (None, 16) 0
dense_439 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_161"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_162 (InputLayer) [(None, 14)] 0
dense_440 (Dense) (None, 32) 480
dense_441 (Dense) (None, 16) 528
dense_442 (Dense) (None, 16) 272
dropout_161 (Dropout) (None, 16) 0
dense_443 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_162"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_163 (InputLayer) [(None, 14)] 0
dense_444 (Dense) (None, 32) 480
dense_445 (Dense) (None, 16) 528
dense_446 (Dense) (None, 16) 272
dropout_162 (Dropout) (None, 16) 0
dense_447 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_163"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_164 (InputLayer) [(None, 14)] 0
dense_448 (Dense) (None, 32) 480
dense_449 (Dense) (None, 16) 528
dense_450 (Dense) (None, 16) 272
dropout_163 (Dropout) (None, 16) 0
dense_451 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_164"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_165 (InputLayer) [(None, 14)] 0
dense_452 (Dense) (None, 32) 480
dense_453 (Dense) (None, 16) 528
dense_454 (Dense) (None, 16) 272
dropout_164 (Dropout) (None, 16) 0
dense_455 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_165"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_166 (InputLayer) [(None, 14)] 0
dense_456 (Dense) (None, 32) 480
dense_457 (Dense) (None, 16) 528
dense_458 (Dense) (None, 16) 272
dropout_165 (Dropout) (None, 16) 0
dense_459 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_166"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_167 (InputLayer) [(None, 14)] 0
dense_460 (Dense) (None, 32) 480
dense_461 (Dense) (None, 16) 528
dense_462 (Dense) (None, 16) 272
dropout_166 (Dropout) (None, 16) 0
dense_463 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_167"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_168 (InputLayer) [(None, 14)] 0
dense_464 (Dense) (None, 32) 480
dense_465 (Dense) (None, 16) 528
dense_466 (Dense) (None, 16) 272
dropout_167 (Dropout) (None, 16) 0
dense_467 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_168"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_169 (InputLayer) [(None, 14)] 0
dense_468 (Dense) (None, 32) 480
dense_469 (Dense) (None, 16) 528
dense_470 (Dense) (None, 16) 272
dropout_168 (Dropout) (None, 16) 0
dense_471 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_169"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_170 (InputLayer) [(None, 14)] 0
dense_472 (Dense) (None, 32) 480
dense_473 (Dense) (None, 16) 528
dense_474 (Dense) (None, 16) 272
dropout_169 (Dropout) (None, 16) 0
dense_475 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_at_MLP14_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best14_at = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP14.
import os
from joblib import dump, load
history_at_MPL14 = []
# Iterar sobre cada modelo en la lista models_MLP14
for i, model in enumerate(models_MLP14_at):
filename = f'history_at_MPL14_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_at14, y=y_train_at14, batch_size=16, epochs=50,
verbose=2, callbacks=[save_best14_at], validation_data=(X_val_at14, y_val_at14),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_at_MPL14.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_at_MPL14_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL14_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL14_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL14_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL14_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL14_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL14_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL14_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL14_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL14_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL14_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL14_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL14_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL14_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL14_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL14_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_at_MLP14_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model14_at = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model14_at = load_model(best_model_path) # Cargar el modelo
if best_model14_at is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_at_MLP14_weights.29-0.0323.keras con val_loss: 0.0323
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
print("Archivos en el directorio 'keras_models':", files)
Archivos en el directorio 'keras_models': ['PRSA_data_vola14_MLP7_weights.04-0.0018.keras', 'PRSA_data_price_MLP21_weights.01-3579.4128.keras', 'PRSA_data_price_LSTM21_weights.01-43062.3047.keras', 'PRSA_data_price_LSTM28_weights.02-42892.2305.keras', 'PRSA_data_price_LSTM_weights.19-67016.3047.keras', 'PRSA_data_vola14_MLP21_weights.20-0.0011.keras', 'PRSA_data_price_LSTM14_weights.06-51394.8750.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.07-0.0033.keras', 'PRSA_data_price_LSTM21_weights.02-43052.0469.keras', 'PRSA_data_at_MLP7_weights.80-0.0780.keras', 'PRSA_data_vola21_LSTM28_weights.02-0.0022.keras', 'PRSA_data_vola21_LSTM14_weights.04-0.0011.keras', 'PRSA_data_vola7_LSTM21_weights.18-0.0029.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0013.keras', 'PRSA_data_vola28_MLP14_weights.04-0.0013.keras', 'PRSA_data_price_MLP14_weights.52-626.2623.keras', 'PRSA_data_vola7_MLP7_weights.15-0.0041.keras', 'PRSA_data_price_LSTM14_weights.13-51325.8516.keras', 'PRSA_data_vola14_MLP7_weights.02-0.0023.keras', 'PRSA_data_price_LSTM21_weights.11-42962.6562.keras', 'PRSA_data_vola7_LSTM28_weights.10-0.0037.keras', 'PRSA_data_at_LSTM_weights.04-1.2071.keras', 'PRSA_data_price_MLP7_weights.50-1342.5273.keras', 'PRSA_data_price_MLP14_weights.03-5108.7837.keras', 'PRSA_data_vola28_MLP7_weights.14-0.0012.keras', 'PRSA_data_at_MLP7_weights.11-2.4883.keras', 'PRSA_data_price_LSTM28_weights.08-42832.5430.keras', 'PRSA_data_price_MLP28_weights.01-9242.6553.keras', 'PRSA_data_vola21_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.16-1013.2251.keras', 'PRSA_data_price_MLP28_weights.13-1042.5178.keras', 'PRSA_data_vola28_MLP28_weights.34-0.0006.keras', 'PRSA_data_at_MLP7_weights.01-19.6635.keras', 'PRSA_data_at_MLP7_weights.19-1.8379.keras', 'PRSA_data_at_MLP7_weights.12-2.3991.keras', 'PRSA_data_price_MLP7_weights.15-1346.3275.keras', 'PRSA_data_vola14_MLP28_weights.02-0.0024.keras', 'PRSA_data_vola14_MLP28_weights.07-0.0017.keras', 'PRSA_data_at_MLP28_weights.05-0.0930.keras', 'PRSA_data_price_LSTM14_weights.20-51256.7266.keras', 'PRSA_data_vola21_LSTM21_weights.05-0.0013.keras', 'PRSA_data_price_MLP14_weights.12-1304.7009.keras', 'PRSA_data_vola7_LSTM14_weights.09-0.0044.keras', 'PRSA_data_vola28_MLP28_weights.50-0.0006.keras', 'PRSA_data_vola7_MLP14_weights.15-0.0020.keras', 'PRSA_data_price_MLP14_weights.08-1386.5365.keras', 'PRSA_data_price_LSTM21_weights.09-42982.2383.keras', 'PRSA_data_price_MLP28_weights.11-7366.8994.keras', 'PRSA_data_vola14_MLP7_weights.18-0.0015.keras', 'PRSA_data_price_MLP7_weights.14-1734.7238.keras', 'PRSA_data_price_LSTM14_weights.15-51306.2070.keras', 'PRSA_data_vola21_MLP28_weights.25-0.0010.keras', 'PRSA_data_at_LSTM14_weights.02-2.2175.keras', 'PRSA_data_vola28_LSTM7_weights.08-0.0014.keras', 'PRSA_data_price_MLP28_weights.10-1107.4047.keras', 'PRSA_data_vola28_MLP7_weights.01-0.0068.keras', 'PRSA_data_vola21_LSTM21_weights.03-0.0015.keras', 'PRSA_data_price_MLP7_weights.01-9253.3047.keras', 'PRSA_data_vola21_MLP14_weights.31-0.0007.keras', 'PRSA_data_price_LSTM28_weights.12-42793.6172.keras', 'PRSA_data_price_LSTM14_weights.09-51365.0000.keras', 'PRSA_data_vola28_MLP28_weights.42-0.0006.keras', 'PRSA_data_price_MLP7_weights.05-4792.5054.keras', 'PRSA_data_vola21_LSTM_weights.01-0.0024.keras', 'PRSA_data_price_LSTM28_weights.17-42745.0352.keras', 'PRSA_data_vola28_MLP28_weights.45-0.0006.keras', 'PRSA_data_vola28_MLP14_weights.08-0.0010.keras', 'PRSA_data_at_LSTM_weights.11-0.6498.keras', 'PRSA_data_vola28_MLP28_weights.39-0.0007.keras', 'PRSA_data_at_MLP21_weights.11-0.0326.keras', 'PRSA_data_price_LSTM_weights.20-67006.4141.keras', 'PRSA_data_vola28_MLP7_weights.06-0.0014.keras', 'PRSA_data_at_LSTM14_weights.06-0.8145.keras', 'PRSA_data_at_MLP28_weights.01-2.1902.keras', 'PRSA_data_price_MLP14_weights.46-814.1992.keras', '.DS_Store', 'PRSA_data_vola28_LSTM28_weights.12-0.0006.keras', 'PRSA_data_price_LSTM14_weights.18-51276.5625.keras', 'PRSA_data_vola21_LSTM14_weights.01-0.0078.keras', 'PRSA_data_vola14_MLP21_weights.04-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM14_weights.02-0.0011.keras', 'PRSA_data_at_MLP7_weights.09-2.9793.keras', 'PRSA_data_vola21_MLP14_weights.06-0.0007.keras', 'PRSA_data_vola14_MLP21_weights.78-0.0010.keras', 'PRSA_data_price_MLP14_weights.64-711.9107.keras', 'PRSA_data_vola14_MLP28_weights.31-0.0011.keras', 'PRSA_data_vola14_LSTM_weights.08-0.0020.keras', 'PRSA_data_vola14_LSTM21_weights.02-0.0018.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0014.keras', 'PRSA_data_vola14_MLP14_weights.29-0.0005.keras', 'PRSA_data_price_LSTM_weights.03-67175.7734.keras', 'PRSA_data_vola14_LSTM_weights.01-0.0068.keras', 'PRSA_data_price_LSTM28_weights.11-42803.3086.keras', 'PRSA_data_price_MLP28_weights.15-7107.3306.keras', 'PRSA_data_vola14_MLP21_weights.49-0.0010.keras', 'PRSA_data_vola28_MLP7_weights.07-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0010.keras', 'PRSA_data_at_LSTM14_weights.15-0.4082.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0011.keras', 'PRSA_data_vola14_MLP14_weights.06-0.0006.keras', 'PRSA_data_vola14_LSTM21_weights.06-0.0012.keras', 'PRSA_data_price_LSTM_weights.17-67036.2891.keras', 'PRSA_data_vola28_MLP7_weights.03-0.0018.keras', 'PRSA_data_vola7_MLP28_weights.02-0.0039.keras', 'PRSA_data_vola7_LSTM28_weights.16-0.0035.keras', 'PRSA_data_vola21_MLP7_weights.20-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.33-0.0009.keras', 'PRSA_data_price_MLP21_weights.64-870.7020.keras', 'PRSA_data_vola21_MLP14_weights.22-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.14-0.0020.keras', 'PRSA_data_price_LSTM21_weights.17-42903.9648.keras', 'PRSA_data_vola21_MLP14_weights.01-0.0021.keras', 'PRSA_data_vola21_LSTM7_weights.16-0.0011.keras', 'PRSA_data_at_LSTM14_weights.11-0.6656.keras', 'PRSA_data_vola28_MLP7_weights.02-0.0019.keras', 'PRSA_data_vola14_MLP28_weights.05-0.0018.keras', 'PRSA_data_vola14_LSTM14_weights.08-0.0005.keras', 'PRSA_data_at_MLP21_weights.02-0.8171.keras', 'PRSA_data_price_MLP28_weights.02-8134.2202.keras', 'PRSA_data_price_LSTM28_weights.06-42852.3320.keras', 'PRSA_data_vola7_MLP14_weights.02-0.0036.keras', 'PRSA_data_vola14_LSTM28_weights.01-0.0026.keras', 'PRSA_data_at_MLP7_weights.04-9.8837.keras', 'PRSA_data_price_MLP28_weights.16-1274.4368.keras', 'PRSA_data_price_LSTM21_weights.07-43002.0078.keras', 'PRSA_data_vola7_MLP28_weights.23-0.0032.keras', 'PRSA_data_price_LSTM_weights.01-67196.3359.keras', 'PRSA_data_at_MLP28_weights.02-1.9156.keras', 'PRSA_data_vola14_MLP21_weights.02-0.0016.keras', 'PRSA_data_vola28_MLP14_weights.14-0.0007.keras', 'PRSA_data_price_LSTM28_weights.07-42842.4375.keras', 'PRSA_data_price_MLP14_weights.37-825.1805.keras', 'PRSA_data_vola7_MLP14_weights.07-0.0021.keras', 'PRSA_data_price_LSTM14_weights.04-51414.8945.keras', 'PRSA_data_price_MLP7_weights.42-1446.1339.keras', 'PRSA_data_price_MLP21_weights.57-932.6497.keras', 'PRSA_data_price_LSTM14_weights.19-51266.6133.keras', 'PRSA_data_vola21_MLP14_weights.34-0.0008.keras', 'PRSA_data_price_MLP14_weights.01-10485.8262.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0012.keras', 'PRSA_data_at_LSTM_weights.02-2.4966.keras', 'PRSA_data_vola21_LSTM28_weights.15-0.0009.keras', 'PRSA_data_price_LSTM_weights.20-67006.3047.keras', 'PRSA_data_price_LSTM_weights.14-67066.1016.keras', 'PRSA_data_price_LSTM21_weights.19-42884.2422.keras', 'PRSA_data_vola28_LSTM21_weights.18-0.0008.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0013.keras', 'PRSA_data_price_LSTM28_weights.20-42714.5508.keras', 'PRSA_data_at_LSTM14_weights.13-0.5609.keras', 'PRSA_data_vola28_LSTM28_weights.04-0.0009.keras', 'PRSA_data_vola14_MLP21_weights.01-0.0118.keras', 'PRSA_data_vola7_MLP21_weights.02-0.0032.keras', 'PRSA_data_at_MLP7_weights.74-0.2838.keras', 'PRSA_data_price_MLP28_weights.17-1217.0461.keras', 'PRSA_data_at_MLP21_weights.16-1.1973.keras', 'PRSA_data_vola7_MLP7_weights.02-0.0036.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0015.keras', 'PRSA_data_vola14_LSTM14_weights.04-0.0006.keras', 'PRSA_data_vola28_LSTM28_weights.18-0.0006.keras', 'PRSA_data_price_MLP21_weights.74-805.9489.keras', 'PRSA_data_vola21_MLP14_weights.02-0.0011.keras', 'PRSA_data_vola28_MLP21_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM_weights.11-0.0014.keras', 'PRSA_data_vola7_LSTM14_weights.14-0.0036.keras', 'PRSA_data_at_MLP28_weights.03-1.7712.keras', 'PRSA_data_price_LSTM21_weights.20-42874.3555.keras', 'PRSA_data_vola28_LSTM7_weights.20-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.04-0.0009.keras', 'PRSA_data_at_MLP7_weights.08-3.6716.keras', 'PRSA_data_at_MLP21_weights.04-0.0734.keras', 'PRSA_data_vola21_LSTM28_weights.03-0.0021.keras', 'PRSA_data_vola21_MLP14_weights.35-0.0008.keras', 'PRSA_data_vola28_LSTM7_weights.11-0.0012.keras', 'PRSA_data_price_LSTM28_weights.04-42872.1953.keras', 'PRSA_data_at_LSTM_weights.08-0.4803.keras', 'PRSA_data_vola28_MLP14_weights.05-0.0010.keras', 'PRSA_data_price_LSTM14_weights.14-51316.0625.keras', 'PRSA_data_vola7_MLP14_weights.01-0.0101.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0015.keras', 'PRSA_data_vola7_LSTM_weights.01-0.0040.keras', 'PRSA_data_vola14_MLP21_weights.09-0.0011.keras', 'PRSA_data_price_LSTM21_weights.05-43021.9570.keras', 'PRSA_data_vola28_MLP28_weights.03-0.0007.keras', 'PRSA_data_vola7_LSTM14_weights.10-0.0042.keras', 'PRSA_data_price_LSTM_weights.07-67135.3359.keras', 'PRSA_data_vola7_MLP14_weights.18-0.0020.keras', 'PRSA_data_price_LSTM21_weights.18-42894.1055.keras', 'PRSA_data_vola7_MLP7_weights.01-0.0042.keras', 'PRSA_data_vola14_LSTM_weights.05-0.0024.keras', 'PRSA_data_vola21_MLP14_weights.27-0.0008.keras', 'PRSA_data_vola28_MLP28_weights.02-0.0018.keras', 'PRSA_data_vola21_LSTM7_weights.03-0.0017.keras', 'PRSA_data_price_LSTM_weights.10-67105.4297.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0010.keras', 'PRSA_data_vola7_MLP21_weights.03-0.0030.keras', 'PRSA_data_at_MLP7_weights.06-5.3724.keras', 'PRSA_data_at_LSTM_weights.05-1.1226.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0026.keras', 'PRSA_data_vola21_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola14_MLP21_weights.05-0.0012.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.08-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.47-0.0008.keras', 'PRSA_data_price_LSTM14_weights.16-51296.3633.keras', 'PRSA_data_at_MLP7_weights.02-16.3421.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0030.keras', 'PRSA_data_vola21_MLP21_weights.09-0.0009.keras', 'PRSA_data_at_LSTM14_weights.01-6.2168.keras', 'PRSA_data_vola28_LSTM21_weights.01-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.16-0.0009.keras', 'PRSA_data_vola21_LSTM28_weights.07-0.0016.keras', 'PRSA_data_vola21_MLP28_weights.19-0.0010.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0009.keras', 'PRSA_data_vola14_MLP7_weights.09-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.02-0.0011.keras', 'PRSA_data_vola28_LSTM14_weights.03-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.09-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.09-0.0012.keras', 'PRSA_data_vola28_MLP21_weights.65-0.0007.keras', 'PRSA_data_vola21_MLP28_weights.15-0.0011.keras', 'PRSA_data_at_MLP7_weights.42-0.8694.keras', 'PRSA_data_vola21_LSTM28_weights.18-0.0008.keras', 'PRSA_data_vola21_MLP14_weights.04-0.0008.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0040.keras', 'PRSA_data_vola21_LSTM14_weights.12-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0029.keras', 'PRSA_data_vola21_MLP28_weights.01-0.0026.keras', 'PRSA_data_price_LSTM_weights.13-67075.9453.keras', 'PRSA_data_vola14_MLP7_weights.01-0.0041.keras', 'PRSA_data_price_MLP21_weights.02-1476.1034.keras', 'PRSA_data_vola14_LSTM28_weights.19-0.0014.keras', 'PRSA_data_price_MLP14_weights.96-663.0834.keras', 'PRSA_data_vola7_MLP21_weights.94-0.0024.keras', 'PRSA_data_price_LSTM21_weights.03-43041.9570.keras', 'PRSA_data_price_MLP7_weights.12-3798.9531.keras', 'PRSA_data_price_LSTM28_weights.10-42813.0352.keras', 'PRSA_data_vola14_MLP28_weights.20-0.0014.keras', 'PRSA_data_vola7_MLP21_weights.04-0.0027.keras', 'PRSA_data_vola7_LSTM28_weights.18-0.0035.keras', 'PRSA_data_at_MLP7_weights.10-2.7632.keras', 'PRSA_data_price_LSTM_weights.06-67145.4141.keras', 'PRSA_data_price_LSTM28_weights.13-42783.9805.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM28_weights.01-42902.4570.keras', 'PRSA_data_price_LSTM_weights.20-67005.9609.keras', 'PRSA_data_vola7_LSTM21_weights.04-0.0034.keras', 'PRSA_data_at_MLP14_weights.03-0.0461.keras', 'PRSA_data_vola28_MLP28_weights.01-0.0042.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0455.keras', 'PRSA_data_price_MLP28_weights.17-1182.9105.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0018.keras', 'PRSA_data_vola28_MLP14_weights.38-0.0006.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0013.keras', 'PRSA_data_vola7_LSTM21_weights.01-0.0038.keras', 'PRSA_data_price_LSTM14_weights.17-51286.4570.keras', 'PRSA_data_price_LSTM28_weights.16-42754.8008.keras', 'PRSA_data_price_LSTM14_weights.11-51345.4023.keras', 'PRSA_data_price_MLP14_weights.22-638.4409.keras', 'PRSA_data_vola7_MLP28_weights.32-0.0029.keras', 'PRSA_data_price_MLP7_weights.48-1648.1864.keras', 'PRSA_data_vola28_LSTM28_weights.10-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.03-0.0021.keras', 'PRSA_data_vola21_LSTM21_weights.13-0.0010.keras', 'PRSA_data_at_MLP7_weights.07-4.2627.keras', 'PRSA_data_vola7_LSTM21_weights.17-0.0029.keras', 'PRSA_data_vola21_LSTM28_weights.08-0.0011.keras', 'PRSA_data_price_MLP21_weights.05-1072.5081.keras', 'PRSA_data_vola7_LSTM21_weights.05-0.0030.keras', 'PRSA_data_price_MLP14_weights.29-856.8320.keras', 'PRSA_data_vola14_LSTM21_weights.11-0.0011.keras', 'PRSA_data_price_LSTM21_weights.10-42972.4219.keras', 'PRSA_data_price_LSTM_weights.02-67185.9453.keras', 'PRSA_data_vola28_MLP14_weights.21-0.0007.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.10-0.0009.keras', 'PRSA_data_price_LSTM28_weights.15-42764.5898.keras', 'PRSA_data_at_LSTM14_weights.07-0.7059.keras', 'PRSA_data_vola21_LSTM21_weights.01-0.0048.keras', 'PRSA_data_at_LSTM14_weights.03-1.3422.keras', 'PRSA_data_vola14_LSTM14_weights.02-0.0015.keras', 'PRSA_data_vola14_MLP14_weights.02-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.02-0.0034.keras', 'PRSA_data_vola7_MLP21_weights.21-0.0025.keras', 'PRSA_data_at_MLP7_weights.18-1.8396.keras', 'PRSA_data_price_LSTM_weights.11-67095.6172.keras', 'PRSA_data_at_LSTM14_weights.12-0.6378.keras', 'PRSA_data_vola21_LSTM21_weights.07-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.45-0.0030.keras', 'PRSA_data_at_MLP7_weights.26-0.8897.keras', 'PRSA_data_vola21_MLP7_weights.01-0.0014.keras', 'PRSA_data_price_MLP14_weights.19-982.5739.keras', 'PRSA_data_vola14_LSTM_weights.16-0.0017.keras', 'PRSA_data_price_MLP21_weights.94-813.8954.keras', 'PRSA_data_vola7_LSTM21_weights.20-0.0029.keras', 'PRSA_data_at_LSTM_weights.03-1.4842.keras', 'PRSA_data_vola21_MLP28_weights.02-0.0015.keras', 'PRSA_data_price_LSTM21_weights.12-42952.9453.keras', 'PRSA_data_price_MLP28_weights.17-1019.2816.keras', 'PRSA_data_vola14_LSTM_weights.13-0.0019.keras', 'PRSA_data_vola7_LSTM_weights.02-0.0038.keras', 'PRSA_data_vola7_LSTM28_weights.02-0.0042.keras', 'PRSA_data_at_LSTM14_weights.13-0.2270.keras', 'PRSA_data_price_LSTM_weights.18-67026.2891.keras', 'PRSA_data_vola28_MLP21_weights.66-0.0007.keras', 'PRSA_data_at_MLP14_weights.42-0.0373.keras', 'PRSA_data_vola7_MLP14_weights.38-0.0020.keras', 'PRSA_data_vola7_LSTM21_weights.11-0.0029.keras', 'PRSA_data_vola21_LSTM21_weights.12-0.0010.keras', 'PRSA_data_at_LSTM_weights.14-0.5158.keras', 'PRSA_data_vola14_LSTM21_weights.01-0.0019.keras', 'PRSA_data_vola28_LSTM21_weights.11-0.0009.keras', 'PRSA_data_vola7_LSTM28_weights.05-0.0038.keras', 'PRSA_data_vola14_MLP14_weights.10-0.0005.keras', 'PRSA_data_at_MLP21_weights.03-2.1540.keras', 'PRSA_data_at_LSTM_weights.16-0.5377.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.44-0.0008.keras', 'PRSA_data_at_MLP7_weights.16-1.9941.keras', 'PRSA_data_vola28_LSTM28_weights.01-0.0010.keras', 'PRSA_data_vola28_LSTM14_weights.19-0.0006.keras', 'PRSA_data_price_LSTM21_weights.14-42933.4180.keras', 'PRSA_data_vola14_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.82-828.7976.keras', 'PRSA_data_vola28_MLP21_weights.01-0.0030.keras', 'PRSA_data_price_LSTM14_weights.10-51355.2070.keras', 'PRSA_data_vola21_MLP21_weights.18-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.73-0.0007.keras', 'PRSA_data_vola28_MLP7_weights.20-0.0011.keras', 'PRSA_data_price_LSTM21_weights.13-42943.1719.keras', 'PRSA_data_vola28_MLP28_weights.36-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.04-0.0025.keras', 'PRSA_data_at_MLP14_weights.02-0.2715.keras', 'PRSA_data_price_LSTM14_weights.05-51404.8945.keras', 'PRSA_data_price_LSTM28_weights.19-42725.4297.keras', 'PRSA_data_price_MLP28_weights.01-1561.6656.keras', 'PRSA_data_vola21_MLP7_weights.14-0.0011.keras', 'PRSA_data_vola7_MLP7_weights.23-0.0033.keras', 'PRSA_data_vola14_MLP28_weights.01-0.0026.keras', 'PRSA_data_at_MLP14_weights.29-0.0323.keras', 'PRSA_data_vola7_MLP21_weights.01-0.0033.keras', 'PRSA_data_vola21_LSTM_weights.19-0.0014.keras', 'PRSA_data_price_MLP7_weights.03-5953.8076.keras', 'PRSA_data_price_MLP21_weights.42-948.6628.keras', 'PRSA_data_vola21_LSTM14_weights.07-0.0008.keras', 'PRSA_data_vola14_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola7_LSTM28_weights.13-0.0037.keras', 'PRSA_data_vola21_MLP21_weights.01-0.0016.keras', 'PRSA_data_vola14_LSTM21_weights.08-0.0011.keras', 'PRSA_data_price_LSTM21_weights.06-43011.9570.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0019.keras', 'PRSA_data_vola7_MLP28_weights.16-0.0033.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0008.keras', 'PRSA_data_vola28_MLP14_weights.01-0.0031.keras', 'PRSA_data_vola14_MLP28_weights.11-0.0015.keras', 'PRSA_data_price_LSTM14_weights.03-51425.0000.keras', 'PRSA_data_vola7_LSTM28_weights.04-0.0039.keras', 'PRSA_data_vola28_LSTM7_weights.13-0.0011.keras', 'PRSA_data_price_LSTM14_weights.01-51445.4883.keras', 'PRSA_data_vola28_MLP28_weights.22-0.0006.keras', 'PRSA_data_price_LSTM_weights.09-67115.3359.keras', 'PRSA_data_vola28_LSTM7_weights.14-0.0011.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0050.keras', 'PRSA_data_vola21_MLP28_weights.17-0.0010.keras', 'PRSA_data_vola28_LSTM21_weights.09-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.13-0.0010.keras', 'PRSA_data_vola14_MLP14_weights.01-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.50-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.2184.keras', 'PRSA_data_vola14_LSTM_weights.02-0.0030.keras', 'PRSA_data_vola7_MLP28_weights.39-0.0030.keras', 'PRSA_data_price_LSTM21_weights.04-43031.9414.keras', 'PRSA_data_price_LSTM_weights.15-67056.2109.keras', 'PRSA_data_at_MLP7_weights.20-1.6814.keras', 'PRSA_data_vola28_LSTM7_weights.18-0.0010.keras', 'PRSA_data_at_MLP7_weights.95-0.2130.keras', 'PRSA_data_vola14_LSTM28_weights.03-0.0021.keras', 'PRSA_data_at_LSTM_weights.01-7.5653.keras', 'PRSA_data_at_MLP7_weights.05-7.2110.keras', 'PRSA_data_vola21_LSTM28_weights.20-0.0008.keras', 'PRSA_data_at_LSTM_weights.07-0.8569.keras', 'PRSA_data_vola14_LSTM28_weights.12-0.0014.keras', 'PRSA_data_price_LSTM21_weights.16-42913.7969.keras', 'PRSA_data_at_MLP28_weights.04-1.3772.keras', 'PRSA_data_price_LSTM_weights.08-67125.3047.keras', 'PRSA_data_price_LSTM14_weights.07-51384.8633.keras', 'PRSA_data_price_LSTM_weights.04-67165.6016.keras', 'PRSA_data_vola14_LSTM28_weights.15-0.0014.keras', 'PRSA_data_at_MLP14_weights.01-0.4195.keras', 'PRSA_data_price_LSTM28_weights.09-42822.7578.keras', 'PRSA_data_price_LSTM28_weights.03-42882.1953.keras', 'PRSA_data_at_LSTM14_weights.05-0.8408.keras', 'PRSA_data_vola7_MLP28_weights.08-0.0030.keras', 'PRSA_data_price_MLP14_weights.07-3004.8474.keras', 'PRSA_data_price_LSTM21_weights.15-42923.6367.keras', 'PRSA_data_vola7_MLP21_weights.81-0.0025.keras', 'PRSA_data_price_MLP21_weights.86-819.7106.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.05-0.0012.keras', 'PRSA_data_vola21_MLP14_weights.20-0.0007.keras', 'PRSA_data_vola14_MLP28_weights.42-0.0011.keras', 'PRSA_data_price_LSTM_weights.12-67085.7578.keras', 'PRSA_data_vola28_MLP21_weights.13-0.0008.keras', 'PRSA_data_vola14_MLP28_weights.08-0.0016.keras', 'PRSA_data_at_MLP7_weights.86-0.0883.keras', 'PRSA_data_vola28_LSTM14_weights.01-0.0020.keras', 'PRSA_data_vola21_MLP21_weights.71-0.0010.keras', 'PRSA_data_at_MLP28_weights.09-0.1537.keras', 'PRSA_data_vola7_MLP28_weights.48-0.0030.keras', 'PRSA_data_at_MLP28_weights.12-0.0354.keras', 'PRSA_data_at_LSTM14_weights.04-0.9732.keras', 'PRSA_data_at_LSTM14_weights.15-0.2154.keras', 'PRSA_data_vola14_MLP28_weights.21-0.0013.keras', 'PRSA_data_vola21_MLP14_weights.25-0.0009.keras', 'PRSA_data_vola14_LSTM14_weights.01-0.0027.keras', 'PRSA_data_vola21_LSTM28_weights.01-0.0039.keras', 'PRSA_data_price_MLP14_weights.02-8059.2510.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0013.keras', 'PRSA_data_vola28_MLP7_weights.04-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.02-0.0043.keras', 'PRSA_data_price_MLP14_weights.78-683.9230.keras', 'PRSA_data_price_MLP21_weights.19-1002.5925.keras', 'PRSA_data_vola28_MLP21_weights.92-0.0007.keras', 'PRSA_data_price_LSTM28_weights.14-42774.2500.keras', 'PRSA_data_vola21_MLP21_weights.06-0.0010.keras', 'PRSA_data_vola14_LSTM14_weights.03-0.0010.keras', 'PRSA_data_price_LSTM_weights.16-67046.2422.keras', 'PRSA_data_vola7_LSTM28_weights.07-0.0037.keras', 'PRSA_data_vola28_LSTM7_weights.01-0.0017.keras', 'PRSA_data_vola28_LSTM14_weights.02-0.0008.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0022.keras', 'PRSA_data_at_MLP21_weights.03-0.2039.keras', 'PRSA_data_vola14_LSTM28_weights.02-0.0023.keras', 'PRSA_data_at_MLP7_weights.03-13.2212.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0037.keras', 'PRSA_data_vola14_MLP21_weights.69-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.18-0.0013.keras', 'PRSA_data_vola7_MLP21_weights.49-0.0025.keras', 'PRSA_data_vola28_MLP28_weights.31-0.0006.keras', 'PRSA_data_price_MLP21_weights.77-841.5739.keras', 'PRSA_data_vola28_MLP14_weights.35-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.26-0.0031.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0065.keras', 'PRSA_data_price_LSTM14_weights.02-51435.1133.keras', 'PRSA_data_price_LSTM28_weights.20-42714.7695.keras', 'PRSA_data_vola28_LSTM28_weights.16-0.0006.keras', 'PRSA_data_vola14_LSTM28_weights.06-0.0016.keras', 'PRSA_data_vola21_MLP21_weights.13-0.0010.keras', 'PRSA_data_vola28_MLP14_weights.32-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.01-0.0043.keras', 'PRSA_data_vola21_MLP28_weights.08-0.0011.keras', 'PRSA_data_vola21_LSTM28_weights.10-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.14-0.0012.keras', 'PRSA_data_vola7_LSTM28_weights.01-0.0049.keras', 'PRSA_data_price_LSTM28_weights.20-42715.6250.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM_weights.05-67155.5078.keras', 'PRSA_data_price_MLP14_weights.14-1073.6493.keras', 'PRSA_data_vola14_LSTM_weights.03-0.0029.keras', 'PRSA_data_vola14_MLP7_weights.07-0.0021.keras', 'PRSA_data_at_MLP28_weights.03-0.0637.keras', 'PRSA_data_vola21_MLP28_weights.03-0.0012.keras', 'PRSA_data_at_MLP28_weights.05-0.1823.keras', 'PRSA_data_at_MLP21_weights.01-0.9762.keras', 'PRSA_data_vola7_LSTM28_weights.15-0.0035.keras', 'PRSA_data_price_LSTM21_weights.08-42992.0703.keras', 'PRSA_data_vola21_MLP21_weights.22-0.0010.keras', 'PRSA_data_vola28_LSTM28_weights.20-0.0006.keras', 'PRSA_data_at_MLP21_weights.01-4.3241.keras', 'PRSA_data_at_LSTM_weights.06-0.8671.keras', 'PRSA_data_price_MLP7_weights.37-1654.4709.keras', 'PRSA_data_at_LSTM_weights.20-0.4626.keras', 'PRSA_data_vola21_MLP28_weights.27-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.4729.keras', 'PRSA_data_at_MLP7_weights.24-1.4718.keras', 'PRSA_data_at_MLP7_weights.59-0.4636.keras', 'PRSA_data_vola14_LSTM14_weights.06-0.0005.keras', 'PRSA_data_at_MLP7_weights.14-2.2322.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0196.keras', 'PRSA_data_at_LSTM14_weights.19-0.3356.keras', 'PRSA_data_price_LSTM28_weights.18-42735.2305.keras', 'PRSA_data_price_LSTM14_weights.12-51335.6523.keras', 'PRSA_data_vola21_LSTM_weights.06-0.0017.keras', 'PRSA_data_price_LSTM28_weights.05-42862.2383.keras', 'PRSA_data_price_MLP7_weights.10-1248.9592.keras', 'PRSA_data_vola21_MLP28_weights.35-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.03-0.0013.keras', 'PRSA_data_price_MLP14_weights.04-5028.7607.keras', 'PRSA_data_price_LSTM14_weights.08-51374.8945.keras', 'PRSA_data_vola28_MLP14_weights.48-0.0007.keras']
# Predicciones usando el mejor modelo
if best_model14_at is not None:
train_preds_at_MLP14 = best_model14_at.predict(X_train_at14)
val_preds_at_MLP14 = best_model14_at.predict(X_val_at14)
test_preds_at_MLP14 = best_model14_at.predict(X_test_at14)
# Aplanar las predicciones si es necesario
train_preds_at_MLP14 = np.squeeze(train_preds_at_MLP14)
val_preds_at_MLP14 = np.squeeze(val_preds_at_MLP14)
test_preds_at_MLP14 = np.squeeze(test_preds_at_MLP14)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_at_MLP14)
print("Predicciones de validación:", val_preds_at_MLP14)
print("Predicciones de prueba:", test_preds_at_MLP14)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
155/155 [==============================] - 0s 222us/step
1/1 [==============================] - 0s 9ms/step
1/1 [==============================] - 0s 9ms/step
Predicciones de Entrenamiento: [ 0.18476558 0.18476558 0.18476558 ... 23.28196 23.292923
23.314037 ]
Predicciones de validación: [23.326107 23.338116 23.352257 23.370762 23.38123 23.397968 23.409077
23.408966 23.431313 23.428944 23.442348 23.44526 23.438065 23.446583]
Predicciones de prueba: [23.44529 23.462875 23.47807 23.515312 23.542103 23.56216 23.566898
23.576416 23.604042 23.6175 23.640625 23.641129 23.664028 23.704872]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 14 días (\(\tau = 14\)).
data_train_plot_at14, data_val_plot_at14, data_test_plot_at14 = data_plot(df_1_st['A_t'], 14)
Datos de entrenamiento:
4998 0.0000
4997 0.0000
4996 0.0000
4995 0.0000
4994 0.0000
...
32 23.4245
31 23.4141
30 23.4028
29 23.4192
28 23.4221
Name: A_t, Length: 4971, dtype: float6414
Datos de validación:
27 23.4757
26 23.5227
25 23.6176
24 23.5968
23 23.6169
22 23.6104
21 23.6288
20 23.7101
19 23.6445
18 23.6804
17 23.6921
16 23.7118
15 23.7147
14 23.7234
Name: A_t, dtype: float6414
Datos de prueba:
13 23.7689
12 23.7602
11 23.7825
10 23.7595
9 23.7326
8 23.6728
7 23.7199
6 23.7083
5 23.6263
4 23.7198
3 23.6852
2 23.6589
1 23.6629
0 23.7125
Name: A_t, dtype: float6414
plot_model(data_train_plot_at14[-100:], data_val_plot_at14, data_test_plot_at14, val_preds_at_MLP14, test_preds_at_MLP14, "Predicciones usando Perceptrón Multicapa (MLP) para un horizonte de 14 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de validación(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train14_at, jarque_bera_pval_MLP_train14_at = diagnostic_plots(y_train_at14, train_preds_at_MLP14)
Ljung-Box LB Statistic: 2856.986047
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_at_MLP_train14 = metricas(y_train_at14,train_preds_at_MLP14)
metrica_at_MLP_train14.index = metrica_at_MLP_train14.index.map({0: 'MLP Entrenamiento Retorno Acumulado τ = 14'})
metrica_at_MLP_train14['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train14_at], index=metrica_at_MLP_train14.index)
metrica_at_MLP_train14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train14_at], index=metrica_at_MLP_train14.index)
metrica_at_MLP_train14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Retorno Acumulado τ = 14 | 94.1028 | 0.79% | 0.08 | 0.02 | 99.95% | 0.0 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP14_at, jarque_bera_pvalMLP14_at = evaluate_residuals(data_test_plot_at14, test_preds_at_MLP14)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP14_test_at = metricas(y_test_at14,test_preds_at_MLP14)
metrica_MLP14_test_at.index = metrica_MLP14_test_at.index.map({0: 'MLP Prueba Retorno Acumulado τ = 14'})
metrica_MLP14_test_at['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP14], index=metrica_MLP14_test_at.index)
metrica_MLP14_test_at['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP14], index=metrica_MLP14_test_at.index)
metrica_MLP14_test_at
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Retorno Acumulado τ = 14 | 0.0764 | 0.28% | 0.07 | 0.01 | -6.65% | 0.0022 | 0.5157 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_at_MPL14)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_at14, train_preds_at_MLP14, y_val_at14, val_preds_at_MLP14, y_test_at14, test_preds_at_MLP14)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada considerablemente el error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo. De igual forma se observa una alta variabilidad en los residuales.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 21 días (\(\tau=21\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(21,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_at21, y_train_at21)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 3.5s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 3.5s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 3.6s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 3.6s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 3.7s
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -0.8212478518486023
A continuación, configuramos la red MLP empleando la API funcional de Keras. Con este método, una capa puede ser designada como la entrada para la siguiente capa en el momento de su definición.
En este contexto, Input es una función utilizada para establecer una capa de entrada en un modelo de red neuronal. La propiedad shape=(21,) define la estructura de los datos de entrada, lo que indica que estos tendrán 21 dimensiones. Por otro lado, dtype='float32' especifica que los elementos de esta capa serán números de punto flotante de 32 bits.
A continuación se define función buil_models para generación de modelos de acuerdo a los parámetros indexados a tráves de la lista.
A idexación de parámetros función build_models_mlp con base a lo indicado en el númeral 3 del parcial práctico.
input_shape21 = 21
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP21_at = build_models_mlp(input_shape21, neurons_list, dropout_rates, 'relu')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_171"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_172 (InputLayer) [(None, 21)] 0
dense_480 (Dense) (None, 32) 704
dense_481 (Dense) (None, 16) 528
dense_482 (Dense) (None, 16) 272
dropout_171 (Dropout) (None, 16) 0
dense_483 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_172"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_173 (InputLayer) [(None, 21)] 0
dense_484 (Dense) (None, 32) 704
dense_485 (Dense) (None, 16) 528
dense_486 (Dense) (None, 16) 272
dropout_172 (Dropout) (None, 16) 0
dense_487 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_173"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_174 (InputLayer) [(None, 21)] 0
dense_488 (Dense) (None, 32) 704
dense_489 (Dense) (None, 16) 528
dense_490 (Dense) (None, 16) 272
dropout_173 (Dropout) (None, 16) 0
dense_491 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_174"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_175 (InputLayer) [(None, 21)] 0
dense_492 (Dense) (None, 32) 704
dense_493 (Dense) (None, 16) 528
dense_494 (Dense) (None, 16) 272
dropout_174 (Dropout) (None, 16) 0
dense_495 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_175"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_176 (InputLayer) [(None, 21)] 0
dense_496 (Dense) (None, 32) 704
dense_497 (Dense) (None, 16) 528
dense_498 (Dense) (None, 16) 272
dropout_175 (Dropout) (None, 16) 0
dense_499 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_176"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_177 (InputLayer) [(None, 21)] 0
dense_500 (Dense) (None, 32) 704
dense_501 (Dense) (None, 16) 528
dense_502 (Dense) (None, 16) 272
dropout_176 (Dropout) (None, 16) 0
dense_503 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_177"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_178 (InputLayer) [(None, 21)] 0
dense_504 (Dense) (None, 32) 704
dense_505 (Dense) (None, 16) 528
dense_506 (Dense) (None, 16) 272
dropout_177 (Dropout) (None, 16) 0
dense_507 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_178"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_179 (InputLayer) [(None, 21)] 0
dense_508 (Dense) (None, 32) 704
dense_509 (Dense) (None, 16) 528
dense_510 (Dense) (None, 16) 272
dropout_178 (Dropout) (None, 16) 0
dense_511 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_179"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_180 (InputLayer) [(None, 21)] 0
dense_512 (Dense) (None, 32) 704
dense_513 (Dense) (None, 16) 528
dense_514 (Dense) (None, 16) 272
dropout_179 (Dropout) (None, 16) 0
dense_515 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_180"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_181 (InputLayer) [(None, 21)] 0
dense_516 (Dense) (None, 32) 704
dense_517 (Dense) (None, 16) 528
dense_518 (Dense) (None, 16) 272
dropout_180 (Dropout) (None, 16) 0
dense_519 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_181"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_182 (InputLayer) [(None, 21)] 0
dense_520 (Dense) (None, 32) 704
dense_521 (Dense) (None, 16) 528
dense_522 (Dense) (None, 16) 272
dropout_181 (Dropout) (None, 16) 0
dense_523 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_182"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_183 (InputLayer) [(None, 21)] 0
dense_524 (Dense) (None, 32) 704
dense_525 (Dense) (None, 16) 528
dense_526 (Dense) (None, 16) 272
dropout_182 (Dropout) (None, 16) 0
dense_527 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_183"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_184 (InputLayer) [(None, 21)] 0
dense_528 (Dense) (None, 32) 704
dense_529 (Dense) (None, 16) 528
dense_530 (Dense) (None, 16) 272
dropout_183 (Dropout) (None, 16) 0
dense_531 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_184"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_185 (InputLayer) [(None, 21)] 0
dense_532 (Dense) (None, 32) 704
dense_533 (Dense) (None, 16) 528
dense_534 (Dense) (None, 16) 272
dropout_184 (Dropout) (None, 16) 0
dense_535 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_185"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_186 (InputLayer) [(None, 21)] 0
dense_536 (Dense) (None, 32) 704
dense_537 (Dense) (None, 16) 528
dense_538 (Dense) (None, 16) 272
dropout_185 (Dropout) (None, 16) 0
dense_539 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_186"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_187 (InputLayer) [(None, 21)] 0
dense_540 (Dense) (None, 32) 704
dense_541 (Dense) (None, 16) 528
dense_542 (Dense) (None, 16) 272
dropout_186 (Dropout) (None, 16) 0
dense_543 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_at_MLP21_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best21_at = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP21.
import os
from joblib import dump, load
# Suponiendo que ts_model es una lista de modelos
history_at_MPL21 = []
# Iterar sobre cada modelo en la lista models_MLP14
for i, model in enumerate(models_MLP21_at):
filename = f'history_at_MPL21_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_at21, y=y_train_at21, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best21_at], validation_data=(X_val_at21, y_val_at21),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_at_MPL21.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_at_MPL21_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL21_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL21_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL21_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL21_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL21_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL21_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL21_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL21_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL21_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL21_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL21_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL21_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL21_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL21_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_MPL21_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_at_MLP21_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model21_at = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model21_at = load_model(best_model_path) # Cargar el modelo
if best_model21_at is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_at_MLP21_weights.11-0.0326.keras con val_loss: 0.0326
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best_model21_at is not None:
train_preds_at_MLP21 = best_model21_at.predict(X_train_at21)
val_preds_at_MLP21 = best_model21_at.predict(X_val_at21)
test_preds_at_MLP21 = best_model21_at.predict(X_test_at21)
# Aplanar las predicciones si es necesario
train_preds_at_MLP21 = np.squeeze(train_preds_at_MLP21)
val_preds_at_MLP21 = np.squeeze(val_preds_at_MLP21)
test_preds_at_MLP21 = np.squeeze(test_preds_at_MLP21)
# Imprimir las predicciones
print('Predicciones de entrenamiento', train_preds_at_MLP21)
print("Predicciones de validación:", val_preds_at_MLP21)
print("Predicciones de prueba:", test_preds_at_MLP21)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
154/154 [==============================] - 0s 199us/step
1/1 [==============================] - 0s 8ms/step
1/1 [==============================] - 0s 9ms/step
Predicciones de entrenamiento [ 0.8859012 0.8859012 0.8859012 ... 23.264261 23.252672 23.258429 ]
Predicciones de validación: [23.258463 23.243237 23.222586 23.229269 23.21482 23.21091 23.215971
23.218592 23.23078 23.227217 23.23064 23.226778 23.230736 23.232473
23.241411 23.244938 23.254019 23.252293 23.263943 23.268297 23.271044]
Predicciones de prueba: [23.279123 23.286695 23.298567 23.317417 23.336485 23.34952 23.367588
23.384176 23.397285 23.408937 23.416931 23.422533 23.423899 23.422758
23.425894 23.427721 23.435617 23.453411 23.464386 23.480364 23.496124]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 21 días (\(\tau = 21\)).
data_train_plot_at21, data_val_plot_at21, data_test_plot_at21 = data_plot(df_1_st['A_t'], 21)
Datos de entrenamiento:
4998 0.0000
4997 0.0000
4996 0.0000
4995 0.0000
4994 0.0000
...
46 23.2651
45 23.2866
44 23.3271
43 23.3404
42 23.3513
Name: A_t, Length: 4957, dtype: float6421
Datos de validación:
41 23.3858
40 23.3813
39 23.4228
38 23.4251
37 23.4296
36 23.4202
35 23.4294
34 23.4230
33 23.4322
32 23.4245
31 23.4141
30 23.4028
29 23.4192
28 23.4221
27 23.4757
26 23.5227
25 23.6176
24 23.5968
23 23.6169
22 23.6104
21 23.6288
Name: A_t, dtype: float6421
Datos de prueba:
20 23.7101
19 23.6445
18 23.6804
17 23.6921
16 23.7118
15 23.7147
14 23.7234
13 23.7689
12 23.7602
11 23.7825
10 23.7595
9 23.7326
8 23.6728
7 23.7199
6 23.7083
5 23.6263
4 23.7198
3 23.6852
2 23.6589
1 23.6629
0 23.7125
Name: A_t, dtype: float6421
plot_model(data_train_plot_at21[-100:], data_val_plot_at21, data_test_plot_at21, val_preds_at_MLP21, test_preds_at_MLP21, "Predicciones usando Perceptrón Multicapa (MLP) para un horizonte de 21 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de validación(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train21_at, jarque_bera_pval_MLP_train21_at = diagnostic_plots(y_train_at21, train_preds_at_MLP21)
Ljung-Box LB Statistic: 4079.943991
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_at_MLP_train21 = metricas(y_train_at21,train_preds_at_MLP21)
metrica_at_MLP_train21.index = metrica_at_MLP_train21.index.map({0: 'MLP Entrenamiento Retorno Acumulado τ = 21'})
metrica_at_MLP_train21['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train21_at], index=metrica_at_MLP_train21.index)
metrica_at_MLP_train21['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train21_at], index=metrica_at_MLP_train21.index)
metrica_at_MLP_train21
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Retorno Acumulado τ = 21 | 225.1957 | 1.16% | 0.13 | 0.05 | 99.87% | 0.0 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP21_at, jarque_bera_pvalMLP21_at = evaluate_residuals(data_test_plot_at21, test_preds_at_MLP21)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP21_test_at = metricas(y_test_at21,test_preds_at_MLP21)
metrica_MLP21_test_at.index = metrica_MLP21_test_at.index.map({0: 'MLP Prueba Retorno Acumulado τ = 21'})
metrica_MLP21_test_at['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP21_at], index=metrica_MLP21_test_at.index)
metrica_MLP21_test_at['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP21_at], index=metrica_MLP21_test_at.index)
metrica_MLP21_test_at
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Retorno Acumulado τ = 21 | 0.1912 | 0.34% | 0.08 | 0.01 | -27.19% | 0.0003 | 0.4259 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_at_MPL21)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_at21, train_preds_at_MLP21, y_val_at21, val_preds_at_MLP21, y_test_at21, test_preds_at_MLP21)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo. De igual forma se observa una alta variabilidad en los residuales.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 28 días (\(\tau=28\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(28,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_at28, y_train_at28)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 4.3s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 4.2s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 4.4s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 4.2s
[CV] END ....activation=relu, epochs=20, learning_rate=0.001; total time= 4.6s
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -0.811628782749176
A continuación, configuramos la red MLP empleando la API funcional de Keras. Con este método, una capa puede ser designada como la entrada para la siguiente capa en el momento de su definición.
En este contexto, Input es una función utilizada para establecer una capa de entrada en un modelo de red neuronal. La propiedad shape=(28,) define la estructura de los datos de entrada, lo que indica que estos tendrán 28 dimensiones. Por otro lado, dtype='float32' especifica que los elementos de esta capa serán números de punto flotante de 32 bits.
A continuación se define función buil_models para generación de modelos de acuerdo a los parámetros indexados a tráves de la lista.
A idexación de parámetros función build_models_mlp con base a lo indicado en el númeral 3 del parcial práctico.
input_shape28 = 28
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP28_at = build_models_mlp(input_shape28, neurons_list, dropout_rates, 'relu')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_188"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_189 (InputLayer) [(None, 28)] 0
dense_548 (Dense) (None, 32) 928
dense_549 (Dense) (None, 16) 528
dense_550 (Dense) (None, 16) 272
dropout_188 (Dropout) (None, 16) 0
dense_551 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_189"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_190 (InputLayer) [(None, 28)] 0
dense_552 (Dense) (None, 32) 928
dense_553 (Dense) (None, 16) 528
dense_554 (Dense) (None, 16) 272
dropout_189 (Dropout) (None, 16) 0
dense_555 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_190"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_191 (InputLayer) [(None, 28)] 0
dense_556 (Dense) (None, 32) 928
dense_557 (Dense) (None, 16) 528
dense_558 (Dense) (None, 16) 272
dropout_190 (Dropout) (None, 16) 0
dense_559 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_191"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_192 (InputLayer) [(None, 28)] 0
dense_560 (Dense) (None, 32) 928
dense_561 (Dense) (None, 16) 528
dense_562 (Dense) (None, 16) 272
dropout_191 (Dropout) (None, 16) 0
dense_563 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_192"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_193 (InputLayer) [(None, 28)] 0
dense_564 (Dense) (None, 32) 928
dense_565 (Dense) (None, 16) 528
dense_566 (Dense) (None, 16) 272
dropout_192 (Dropout) (None, 16) 0
dense_567 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_193"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_194 (InputLayer) [(None, 28)] 0
dense_568 (Dense) (None, 32) 928
dense_569 (Dense) (None, 16) 528
dense_570 (Dense) (None, 16) 272
dropout_193 (Dropout) (None, 16) 0
dense_571 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_194"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_195 (InputLayer) [(None, 28)] 0
dense_572 (Dense) (None, 32) 928
dense_573 (Dense) (None, 16) 528
dense_574 (Dense) (None, 16) 272
dropout_194 (Dropout) (None, 16) 0
dense_575 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_195"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_196 (InputLayer) [(None, 28)] 0
dense_576 (Dense) (None, 32) 928
dense_577 (Dense) (None, 16) 528
dense_578 (Dense) (None, 16) 272
dropout_195 (Dropout) (None, 16) 0
dense_579 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_196"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_197 (InputLayer) [(None, 28)] 0
dense_580 (Dense) (None, 32) 928
dense_581 (Dense) (None, 16) 528
dense_582 (Dense) (None, 16) 272
dropout_196 (Dropout) (None, 16) 0
dense_583 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_197"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_198 (InputLayer) [(None, 28)] 0
dense_584 (Dense) (None, 32) 928
dense_585 (Dense) (None, 16) 528
dense_586 (Dense) (None, 16) 272
dropout_197 (Dropout) (None, 16) 0
dense_587 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_198"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_199 (InputLayer) [(None, 28)] 0
dense_588 (Dense) (None, 32) 928
dense_589 (Dense) (None, 16) 528
dense_590 (Dense) (None, 16) 272
dropout_198 (Dropout) (None, 16) 0
dense_591 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_199"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_200 (InputLayer) [(None, 28)] 0
dense_592 (Dense) (None, 32) 928
dense_593 (Dense) (None, 16) 528
dense_594 (Dense) (None, 16) 272
dropout_199 (Dropout) (None, 16) 0
dense_595 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_200"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_201 (InputLayer) [(None, 28)] 0
dense_596 (Dense) (None, 32) 928
dense_597 (Dense) (None, 16) 528
dense_598 (Dense) (None, 16) 272
dropout_200 (Dropout) (None, 16) 0
dense_599 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_201"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_202 (InputLayer) [(None, 28)] 0
dense_600 (Dense) (None, 32) 928
dense_601 (Dense) (None, 16) 528
dense_602 (Dense) (None, 16) 272
dropout_201 (Dropout) (None, 16) 0
dense_603 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_202"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_203 (InputLayer) [(None, 28)] 0
dense_604 (Dense) (None, 32) 928
dense_605 (Dense) (None, 16) 528
dense_606 (Dense) (None, 16) 272
dropout_202 (Dropout) (None, 16) 0
dense_607 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_203"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_204 (InputLayer) [(None, 28)] 0
dense_608 (Dense) (None, 32) 928
dense_609 (Dense) (None, 16) 528
dense_610 (Dense) (None, 16) 272
dropout_203 (Dropout) (None, 16) 0
dense_611 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_at_MLP28_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best28_at = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP28.
import os
from joblib import dump, load
# Suponiendo que ts_model es una lista de modelos
history_at_MPL28 = []
# Iterar sobre cada modelo en la lista models_MLP14
for i, model in enumerate(models_MLP28_at):
filename = f'history_ag_MPL28_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_at28, y=y_train_at28, batch_size=16, epochs=20, #Las epocas son las estimadas en el grid search
verbose=2, callbacks=[save_best28_at], validation_data=(X_val_at28, y_val_at28),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_at_MPL28.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_ag_MPL28_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_ag_MPL28_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_ag_MPL28_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_ag_MPL28_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_ag_MPL28_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_ag_MPL28_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_ag_MPL28_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_ag_MPL28_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_ag_MPL28_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_ag_MPL28_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_ag_MPL28_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_ag_MPL28_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_ag_MPL28_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_ag_MPL28_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_ag_MPL28_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_ag_MPL28_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_at_MLP28_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model28_at = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model28_at = load_model(best_model_path) # Cargar el modelo
if best_model28_at is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_at_MLP28_weights.12-0.0354.keras con val_loss: 0.0354
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best_model28_at is not None:
train_preds_at_MLP28 = best_model28_at.predict(X_train_at28)
val_preds_at_MLP28 = best_model28_at.predict(X_val_at28)
test_preds_at_MLP28 = best_model28_at.predict(X_test_at28)
# Aplanar las predicciones si es necesario
train_preds_at_MLP28 = np.squeeze(train_preds_at_MLP28)
val_preds_at_MLP28 = np.squeeze(val_preds_at_MLP28)
test_preds_at_MLP28 = np.squeeze(test_preds_at_MLP28)
# Imprimir las predicciones
print("Predicciones de entrenamiento:", train_preds_at_MLP28)
print("Predicciones de validación:", val_preds_at_MLP28)
print("Predicciones de prueba:", test_preds_at_MLP28)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
153/153 [==============================] - 0s 206us/step
1/1 [==============================] - 0s 8ms/step
1/1 [==============================] - 0s 9ms/step
Predicciones de entrenamiento: [ 0.2793349 0.2793349 0.2793349 ... 23.200956 23.194052 23.192263 ]
Predicciones de validación: [23.19198 23.191072 23.191805 23.204468 23.209131 23.215193 23.20632
23.209276 23.228174 23.232864 23.230886 23.237192 23.230932 23.222807
23.216152 23.221617 23.22116 23.220476 23.21031 23.202564 23.208431
23.199755 23.216951 23.220448 23.217342 23.208328 23.21631 23.215166]
Predicciones de prueba: [23.21665 23.224905 23.214182 23.224089 23.215078 23.205296 23.215624
23.217966 23.221514 23.217936 23.201454 23.193897 23.196539 23.204798
23.214743 23.221205 23.236559 23.250154 23.264048 23.282892 23.285479
23.281212 23.288769 23.280432 23.289543 23.293667 23.299582 23.302547]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 28 días (\(\tau = 28\)).
data_train_plot_at28, data_val_plot_at28 , data_test_plot_at28 = data_plot(df_1_st['A_t'], 28)
Datos de entrenamiento:
4998 0.0000
4997 0.0000
4996 0.0000
4995 0.0000
4994 0.0000
...
60 23.1620
59 23.1582
58 23.2052
57 23.2126
56 23.2105
Name: A_t, Length: 4943, dtype: float6428
Datos de validación:
55 23.2407
54 23.2325
53 23.2240
52 23.2357
51 23.2384
50 23.2340
49 23.2241
48 23.2268
47 23.2360
46 23.2651
45 23.2866
44 23.3271
43 23.3404
42 23.3513
41 23.3858
40 23.3813
39 23.4228
38 23.4251
37 23.4296
36 23.4202
35 23.4294
34 23.4230
33 23.4322
32 23.4245
31 23.4141
30 23.4028
29 23.4192
28 23.4221
Name: A_t, dtype: float6428
Datos de prueba:
27 23.4757
26 23.5227
25 23.6176
24 23.5968
23 23.6169
22 23.6104
21 23.6288
20 23.7101
19 23.6445
18 23.6804
17 23.6921
16 23.7118
15 23.7147
14 23.7234
13 23.7689
12 23.7602
11 23.7825
10 23.7595
9 23.7326
8 23.6728
7 23.7199
6 23.7083
5 23.6263
4 23.7198
3 23.6852
2 23.6589
1 23.6629
0 23.7125
Name: A_t, dtype: float6428
plot_model(data_train_plot_at28[-100:], data_val_plot_at28, data_test_plot_at28, val_preds_at_MLP28, test_preds_at_MLP28, "Predicciones usando Perceptrón Multicapa (MLP) para un horizonte de 28 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de validación(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train28_at, jarque_bera_pval_MLP_train28_at = diagnostic_plots(y_train_at28, train_preds_at_MLP28)
Ljung-Box LB Statistic: 4311.502935
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_at_MLP_train28 = metricas(y_train_at28,train_preds_at_MLP28)
metrica_at_MLP_train28.index = metrica_at_MLP_train28.index.map({0: 'MLP Entrenamiento Retorno Acumulado τ = 28'})
metrica_at_MLP_train28['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train28_at], index=metrica_at_MLP_train28.index)
metrica_at_MLP_train28['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train28_at], index=metrica_at_MLP_train28.index)
metrica_at_MLP_train28
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Retorno Acumulado τ = 28 | 289.4953 | 1.57% | 0.16 | 0.06 | 99.84% | 0.0 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP28_at, jarque_bera_pvalMLP28_at = evaluate_residuals(data_test_plot_at28, test_preds_at_MLP28)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP28_test_at = metricas(y_test_at28,test_preds_at_MLP28)
metrica_MLP28_test_at.index = metrica_MLP28_test_at.index.map({0: 'MLP Prueba Retorno Acumulado τ = 28'})
metrica_MLP28_test_at['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP28_at], index=metrica_MLP28_test_at.index)
metrica_MLP28_test_at['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP28_at], index=metrica_MLP28_test_at.index)
metrica_MLP28_test_at
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Retorno Acumulado τ = 28 | 0.3766 | 0.42% | 0.1 | 0.01 | -89.75% | 0.0001 | 0.7522 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_at_MPL28)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_at28, train_preds_at_MLP28, y_val_at28, val_preds_at_MLP28, y_test_at28, test_preds_at_MLP28)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra muy cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo. De igual forma se observa una alta variabilidad en los residuales.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Retorno Acumulado: Memoria a Corto y Largo Plazo (LSTM) #
Ya definimos los regresores(X) y la variable objetivo(y) para el proceso de entrenamiento y validación en la selección correspondiente al modelo Perceptrones Multicapa a tráves de la función create_time_series_datasets(), sin embargo, ésta se utiliza para generar arreglos 2D de forma (números de muestras, números de paso de tiempo). Dado que la entrada a las capas de una RNN debe ser de forma: número de muestras, número de paso de tiempo, número de características por paso de tiempo; procedemos con la sefinición de la función change_dimension_lstm() para realizar la transformación de 2D a 3D.
def change_dimension_lstm(X_train, X_val, X_test):
X_train_lstm, X_val_lstm, X_test_lstm = X_train.reshape((X_train.shape[0], X_train.shape[1], 1)), X_val.reshape((X_val.shape[0], X_val.shape[1], 1)), X_test.reshape((X_test.shape[0], X_test.shape[1], 1))
print('Shape of 3D arrays X:', X_train_lstm.shape, X_val_lstm.shape, X_test_lstm.shape)
return X_train_lstm, X_val_lstm, X_test_lstm
Horizonte de 7 días (\(\tau=7\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_at_lstm_7, X_val_at_lstm_7, X_test_at_lstm_7 = change_dimension_lstm(X_train_at7, X_val_at7, X_test_at7)
Shape of 3D arrays X: (4971, 7, 1) (7, 7, 1) (7, 7, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(7,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['tanh'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=10, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_at_lstm_7, y_train_at7)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=tanh, epochs=20, optimizer=SGD; total time= 22.7s
[CV] END ..........activation=tanh, epochs=20, optimizer=SGD; total time= 22.6s
[CV] END ..........activation=tanh, epochs=20, optimizer=SGD; total time= 22.7s
[CV] END ..........activation=tanh, epochs=20, optimizer=SGD; total time= 22.9s
[CV] END ..........activation=tanh, epochs=20, optimizer=SGD; total time= 23.0s
Mejor función de activación: tanh
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 0.4683504606453311
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape7 = 7
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM7_at = build_models_lstm(input_shape7, neurons_list, dropout_rates ,'SGD')
Model: "model_205"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_206 (InputLayer) [(None, 7, 1)] 0
lstm_138 (LSTM) (None, 7, 64) 16896
lstm_139 (LSTM) (None, 32) 12416
dropout_205 (Dropout) (None, 32) 0
dense_613 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_205"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_206 (InputLayer) [(None, 7, 1)] 0
lstm_138 (LSTM) (None, 7, 64) 16896
lstm_139 (LSTM) (None, 32) 12416
dropout_205 (Dropout) (None, 32) 0
dense_613 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_206"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_207 (InputLayer) [(None, 7, 1)] 0
lstm_140 (LSTM) (None, 7, 64) 16896
lstm_141 (LSTM) (None, 32) 12416
dropout_206 (Dropout) (None, 32) 0
dense_614 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_206"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_207 (InputLayer) [(None, 7, 1)] 0
lstm_140 (LSTM) (None, 7, 64) 16896
lstm_141 (LSTM) (None, 32) 12416
dropout_206 (Dropout) (None, 32) 0
dense_614 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_207"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_208 (InputLayer) [(None, 7, 1)] 0
lstm_142 (LSTM) (None, 7, 64) 16896
lstm_143 (LSTM) (None, 32) 12416
dropout_207 (Dropout) (None, 32) 0
dense_615 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_207"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_208 (InputLayer) [(None, 7, 1)] 0
lstm_142 (LSTM) (None, 7, 64) 16896
lstm_143 (LSTM) (None, 32) 12416
dropout_207 (Dropout) (None, 32) 0
dense_615 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_208"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_209 (InputLayer) [(None, 7, 1)] 0
lstm_144 (LSTM) (None, 7, 64) 16896
lstm_145 (LSTM) (None, 32) 12416
dropout_208 (Dropout) (None, 32) 0
dense_616 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_208"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_209 (InputLayer) [(None, 7, 1)] 0
lstm_144 (LSTM) (None, 7, 64) 16896
lstm_145 (LSTM) (None, 32) 12416
dropout_208 (Dropout) (None, 32) 0
dense_616 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_209"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_210 (InputLayer) [(None, 7, 1)] 0
lstm_146 (LSTM) (None, 7, 64) 16896
lstm_147 (LSTM) (None, 32) 12416
dropout_209 (Dropout) (None, 32) 0
dense_617 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_209"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_210 (InputLayer) [(None, 7, 1)] 0
lstm_146 (LSTM) (None, 7, 64) 16896
lstm_147 (LSTM) (None, 32) 12416
dropout_209 (Dropout) (None, 32) 0
dense_617 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_210"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_211 (InputLayer) [(None, 7, 1)] 0
lstm_148 (LSTM) (None, 7, 64) 16896
lstm_149 (LSTM) (None, 32) 12416
dropout_210 (Dropout) (None, 32) 0
dense_618 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_210"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_211 (InputLayer) [(None, 7, 1)] 0
lstm_148 (LSTM) (None, 7, 64) 16896
lstm_149 (LSTM) (None, 32) 12416
dropout_210 (Dropout) (None, 32) 0
dense_618 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_211"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_212 (InputLayer) [(None, 7, 1)] 0
lstm_150 (LSTM) (None, 7, 64) 16896
lstm_151 (LSTM) (None, 32) 12416
dropout_211 (Dropout) (None, 32) 0
dense_619 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_211"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_212 (InputLayer) [(None, 7, 1)] 0
lstm_150 (LSTM) (None, 7, 64) 16896
lstm_151 (LSTM) (None, 32) 12416
dropout_211 (Dropout) (None, 32) 0
dense_619 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_212"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_213 (InputLayer) [(None, 7, 1)] 0
lstm_152 (LSTM) (None, 7, 64) 16896
lstm_153 (LSTM) (None, 32) 12416
dropout_212 (Dropout) (None, 32) 0
dense_620 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_212"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_213 (InputLayer) [(None, 7, 1)] 0
lstm_152 (LSTM) (None, 7, 64) 16896
lstm_153 (LSTM) (None, 32) 12416
dropout_212 (Dropout) (None, 32) 0
dense_620 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_213"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_214 (InputLayer) [(None, 7, 1)] 0
lstm_154 (LSTM) (None, 7, 64) 16896
lstm_155 (LSTM) (None, 32) 12416
dropout_213 (Dropout) (None, 32) 0
dense_621 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_213"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_214 (InputLayer) [(None, 7, 1)] 0
lstm_154 (LSTM) (None, 7, 64) 16896
lstm_155 (LSTM) (None, 32) 12416
dropout_213 (Dropout) (None, 32) 0
dense_621 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_214"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_215 (InputLayer) [(None, 7, 1)] 0
lstm_156 (LSTM) (None, 7, 64) 16896
lstm_157 (LSTM) (None, 32) 12416
dropout_214 (Dropout) (None, 32) 0
dense_622 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_214"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_215 (InputLayer) [(None, 7, 1)] 0
lstm_156 (LSTM) (None, 7, 64) 16896
lstm_157 (LSTM) (None, 32) 12416
dropout_214 (Dropout) (None, 32) 0
dense_622 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_215"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_216 (InputLayer) [(None, 7, 1)] 0
lstm_158 (LSTM) (None, 7, 64) 16896
lstm_159 (LSTM) (None, 32) 12416
dropout_215 (Dropout) (None, 32) 0
dense_623 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_215"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_216 (InputLayer) [(None, 7, 1)] 0
lstm_158 (LSTM) (None, 7, 64) 16896
lstm_159 (LSTM) (None, 32) 12416
dropout_215 (Dropout) (None, 32) 0
dense_623 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_216"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_217 (InputLayer) [(None, 7, 1)] 0
lstm_160 (LSTM) (None, 7, 64) 16896
lstm_161 (LSTM) (None, 32) 12416
dropout_216 (Dropout) (None, 32) 0
dense_624 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_216"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_217 (InputLayer) [(None, 7, 1)] 0
lstm_160 (LSTM) (None, 7, 64) 16896
lstm_161 (LSTM) (None, 32) 12416
dropout_216 (Dropout) (None, 32) 0
dense_624 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_217"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_218 (InputLayer) [(None, 7, 1)] 0
lstm_162 (LSTM) (None, 7, 64) 16896
lstm_163 (LSTM) (None, 32) 12416
dropout_217 (Dropout) (None, 32) 0
dense_625 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_217"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_218 (InputLayer) [(None, 7, 1)] 0
lstm_162 (LSTM) (None, 7, 64) 16896
lstm_163 (LSTM) (None, 32) 12416
dropout_217 (Dropout) (None, 32) 0
dense_625 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_218"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_219 (InputLayer) [(None, 7, 1)] 0
lstm_164 (LSTM) (None, 7, 64) 16896
lstm_165 (LSTM) (None, 32) 12416
dropout_218 (Dropout) (None, 32) 0
dense_626 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_218"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_219 (InputLayer) [(None, 7, 1)] 0
lstm_164 (LSTM) (None, 7, 64) 16896
lstm_165 (LSTM) (None, 32) 12416
dropout_218 (Dropout) (None, 32) 0
dense_626 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_219"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_220 (InputLayer) [(None, 7, 1)] 0
lstm_166 (LSTM) (None, 7, 64) 16896
lstm_167 (LSTM) (None, 32) 12416
dropout_219 (Dropout) (None, 32) 0
dense_627 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_219"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_220 (InputLayer) [(None, 7, 1)] 0
lstm_166 (LSTM) (None, 7, 64) 16896
lstm_167 (LSTM) (None, 32) 12416
dropout_219 (Dropout) (None, 32) 0
dense_627 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_220"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_221 (InputLayer) [(None, 7, 1)] 0
lstm_168 (LSTM) (None, 7, 64) 16896
lstm_169 (LSTM) (None, 32) 12416
dropout_220 (Dropout) (None, 32) 0
dense_628 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_220"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_221 (InputLayer) [(None, 7, 1)] 0
lstm_168 (LSTM) (None, 7, 64) 16896
lstm_169 (LSTM) (None, 32) 12416
dropout_220 (Dropout) (None, 32) 0
dense_628 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_at_LSTM_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best7_lstm_at = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM7.
import os
from joblib import dump, load
# Suponiendo que ts_model es una lista de modelos
history_at_LSTM7 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM7_at):
filename = f'history_at_LSTM_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_at_lstm_7, y=y_train_at7, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best7_lstm_at], validation_data=(X_val_at_lstm_7, y_val_at7),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_at_LSTM7.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_at_LSTM_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_at_LSTM_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best7_lstm_at = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best7_lstm_at = load_model(best_model_path) # Cargar el modelo
if best7_lstm_at is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_at_LSTM_weights.19-0.2184.keras con val_loss: 0.2184
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
print("Archivos en el directorio 'keras_models':", files)
Archivos en el directorio 'keras_models': ['PRSA_data_vola14_MLP7_weights.04-0.0018.keras', 'PRSA_data_price_MLP21_weights.01-3579.4128.keras', 'PRSA_data_price_LSTM21_weights.01-43062.3047.keras', 'PRSA_data_price_LSTM28_weights.02-42892.2305.keras', 'PRSA_data_price_LSTM_weights.19-67016.3047.keras', 'PRSA_data_vola14_MLP21_weights.20-0.0011.keras', 'PRSA_data_price_LSTM14_weights.06-51394.8750.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.07-0.0033.keras', 'PRSA_data_price_LSTM21_weights.02-43052.0469.keras', 'PRSA_data_at_MLP7_weights.80-0.0780.keras', 'PRSA_data_vola21_LSTM28_weights.02-0.0022.keras', 'PRSA_data_vola21_LSTM14_weights.04-0.0011.keras', 'PRSA_data_vola7_LSTM21_weights.18-0.0029.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0013.keras', 'PRSA_data_vola28_MLP14_weights.04-0.0013.keras', 'PRSA_data_price_MLP14_weights.52-626.2623.keras', 'PRSA_data_vola7_MLP7_weights.15-0.0041.keras', 'PRSA_data_price_LSTM14_weights.13-51325.8516.keras', 'PRSA_data_vola14_MLP7_weights.02-0.0023.keras', 'PRSA_data_price_LSTM21_weights.11-42962.6562.keras', 'PRSA_data_vola7_LSTM28_weights.10-0.0037.keras', 'PRSA_data_at_LSTM_weights.04-1.2071.keras', 'PRSA_data_price_MLP7_weights.50-1342.5273.keras', 'PRSA_data_price_MLP14_weights.03-5108.7837.keras', 'PRSA_data_vola28_MLP7_weights.14-0.0012.keras', 'PRSA_data_at_MLP7_weights.11-2.4883.keras', 'PRSA_data_price_LSTM28_weights.08-42832.5430.keras', 'PRSA_data_price_MLP28_weights.01-9242.6553.keras', 'PRSA_data_vola21_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.16-1013.2251.keras', 'PRSA_data_price_MLP28_weights.13-1042.5178.keras', 'PRSA_data_vola28_MLP28_weights.34-0.0006.keras', 'PRSA_data_at_MLP7_weights.01-19.6635.keras', 'PRSA_data_at_MLP7_weights.19-1.8379.keras', 'PRSA_data_at_MLP7_weights.12-2.3991.keras', 'PRSA_data_price_MLP7_weights.15-1346.3275.keras', 'PRSA_data_vola14_MLP28_weights.02-0.0024.keras', 'PRSA_data_vola14_MLP28_weights.07-0.0017.keras', 'PRSA_data_at_MLP28_weights.05-0.0930.keras', 'PRSA_data_price_LSTM14_weights.20-51256.7266.keras', 'PRSA_data_vola21_LSTM21_weights.05-0.0013.keras', 'PRSA_data_price_MLP14_weights.12-1304.7009.keras', 'PRSA_data_vola7_LSTM14_weights.09-0.0044.keras', 'PRSA_data_vola28_MLP28_weights.50-0.0006.keras', 'PRSA_data_vola7_MLP14_weights.15-0.0020.keras', 'PRSA_data_price_MLP14_weights.08-1386.5365.keras', 'PRSA_data_price_LSTM21_weights.09-42982.2383.keras', 'PRSA_data_price_MLP28_weights.11-7366.8994.keras', 'PRSA_data_vola14_MLP7_weights.18-0.0015.keras', 'PRSA_data_price_MLP7_weights.14-1734.7238.keras', 'PRSA_data_price_LSTM14_weights.15-51306.2070.keras', 'PRSA_data_vola21_MLP28_weights.25-0.0010.keras', 'PRSA_data_at_LSTM14_weights.02-2.2175.keras', 'PRSA_data_vola28_LSTM7_weights.08-0.0014.keras', 'PRSA_data_price_MLP28_weights.10-1107.4047.keras', 'PRSA_data_vola28_MLP7_weights.01-0.0068.keras', 'PRSA_data_vola21_LSTM21_weights.03-0.0015.keras', 'PRSA_data_price_MLP7_weights.01-9253.3047.keras', 'PRSA_data_vola21_MLP14_weights.31-0.0007.keras', 'PRSA_data_price_LSTM28_weights.12-42793.6172.keras', 'PRSA_data_price_LSTM14_weights.09-51365.0000.keras', 'PRSA_data_vola28_MLP28_weights.42-0.0006.keras', 'PRSA_data_price_MLP7_weights.05-4792.5054.keras', 'PRSA_data_vola21_LSTM_weights.01-0.0024.keras', 'PRSA_data_price_LSTM28_weights.17-42745.0352.keras', 'PRSA_data_vola28_MLP28_weights.45-0.0006.keras', 'PRSA_data_vola28_MLP14_weights.08-0.0010.keras', 'PRSA_data_at_LSTM_weights.11-0.6498.keras', 'PRSA_data_vola28_MLP28_weights.39-0.0007.keras', 'PRSA_data_at_MLP21_weights.11-0.0326.keras', 'PRSA_data_price_LSTM_weights.20-67006.4141.keras', 'PRSA_data_vola28_MLP7_weights.06-0.0014.keras', 'PRSA_data_at_LSTM14_weights.06-0.8145.keras', 'PRSA_data_at_MLP28_weights.01-2.1902.keras', 'PRSA_data_price_MLP14_weights.46-814.1992.keras', '.DS_Store', 'PRSA_data_vola28_LSTM28_weights.12-0.0006.keras', 'PRSA_data_price_LSTM14_weights.18-51276.5625.keras', 'PRSA_data_vola21_LSTM14_weights.01-0.0078.keras', 'PRSA_data_vola14_MLP21_weights.04-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM14_weights.02-0.0011.keras', 'PRSA_data_at_MLP7_weights.09-2.9793.keras', 'PRSA_data_vola21_MLP14_weights.06-0.0007.keras', 'PRSA_data_vola14_MLP21_weights.78-0.0010.keras', 'PRSA_data_price_MLP14_weights.64-711.9107.keras', 'PRSA_data_vola14_MLP28_weights.31-0.0011.keras', 'PRSA_data_vola14_LSTM_weights.08-0.0020.keras', 'PRSA_data_vola14_LSTM21_weights.02-0.0018.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0014.keras', 'PRSA_data_vola14_MLP14_weights.29-0.0005.keras', 'PRSA_data_price_LSTM_weights.03-67175.7734.keras', 'PRSA_data_vola14_LSTM_weights.01-0.0068.keras', 'PRSA_data_price_LSTM28_weights.11-42803.3086.keras', 'PRSA_data_price_MLP28_weights.15-7107.3306.keras', 'PRSA_data_vola14_MLP21_weights.49-0.0010.keras', 'PRSA_data_vola28_MLP7_weights.07-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0010.keras', 'PRSA_data_at_LSTM14_weights.15-0.4082.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0011.keras', 'PRSA_data_vola14_MLP14_weights.06-0.0006.keras', 'PRSA_data_vola14_LSTM21_weights.06-0.0012.keras', 'PRSA_data_price_LSTM_weights.17-67036.2891.keras', 'PRSA_data_vola28_MLP7_weights.03-0.0018.keras', 'PRSA_data_vola7_MLP28_weights.02-0.0039.keras', 'PRSA_data_vola7_LSTM28_weights.16-0.0035.keras', 'PRSA_data_vola21_MLP7_weights.20-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.33-0.0009.keras', 'PRSA_data_price_MLP21_weights.64-870.7020.keras', 'PRSA_data_vola21_MLP14_weights.22-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.14-0.0020.keras', 'PRSA_data_price_LSTM21_weights.17-42903.9648.keras', 'PRSA_data_vola21_MLP14_weights.01-0.0021.keras', 'PRSA_data_vola21_LSTM7_weights.16-0.0011.keras', 'PRSA_data_at_LSTM14_weights.11-0.6656.keras', 'PRSA_data_vola28_MLP7_weights.02-0.0019.keras', 'PRSA_data_vola14_MLP28_weights.05-0.0018.keras', 'PRSA_data_vola14_LSTM14_weights.08-0.0005.keras', 'PRSA_data_at_MLP21_weights.02-0.8171.keras', 'PRSA_data_price_MLP28_weights.02-8134.2202.keras', 'PRSA_data_price_LSTM28_weights.06-42852.3320.keras', 'PRSA_data_vola7_MLP14_weights.02-0.0036.keras', 'PRSA_data_vola14_LSTM28_weights.01-0.0026.keras', 'PRSA_data_at_MLP7_weights.04-9.8837.keras', 'PRSA_data_price_MLP28_weights.16-1274.4368.keras', 'PRSA_data_price_LSTM21_weights.07-43002.0078.keras', 'PRSA_data_vola7_MLP28_weights.23-0.0032.keras', 'PRSA_data_price_LSTM_weights.01-67196.3359.keras', 'PRSA_data_at_MLP28_weights.02-1.9156.keras', 'PRSA_data_vola14_MLP21_weights.02-0.0016.keras', 'PRSA_data_vola28_MLP14_weights.14-0.0007.keras', 'PRSA_data_price_LSTM28_weights.07-42842.4375.keras', 'PRSA_data_price_MLP14_weights.37-825.1805.keras', 'PRSA_data_vola7_MLP14_weights.07-0.0021.keras', 'PRSA_data_price_LSTM14_weights.04-51414.8945.keras', 'PRSA_data_price_MLP7_weights.42-1446.1339.keras', 'PRSA_data_price_MLP21_weights.57-932.6497.keras', 'PRSA_data_price_LSTM14_weights.19-51266.6133.keras', 'PRSA_data_vola21_MLP14_weights.34-0.0008.keras', 'PRSA_data_price_MLP14_weights.01-10485.8262.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0012.keras', 'PRSA_data_at_LSTM_weights.02-2.4966.keras', 'PRSA_data_vola21_LSTM28_weights.15-0.0009.keras', 'PRSA_data_price_LSTM_weights.20-67006.3047.keras', 'PRSA_data_price_LSTM_weights.14-67066.1016.keras', 'PRSA_data_price_LSTM21_weights.19-42884.2422.keras', 'PRSA_data_vola28_LSTM21_weights.18-0.0008.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0013.keras', 'PRSA_data_price_LSTM28_weights.20-42714.5508.keras', 'PRSA_data_at_LSTM14_weights.13-0.5609.keras', 'PRSA_data_vola28_LSTM28_weights.04-0.0009.keras', 'PRSA_data_vola14_MLP21_weights.01-0.0118.keras', 'PRSA_data_vola7_MLP21_weights.02-0.0032.keras', 'PRSA_data_at_MLP7_weights.74-0.2838.keras', 'PRSA_data_price_MLP28_weights.17-1217.0461.keras', 'PRSA_data_at_MLP21_weights.16-1.1973.keras', 'PRSA_data_vola7_MLP7_weights.02-0.0036.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0015.keras', 'PRSA_data_vola14_LSTM14_weights.04-0.0006.keras', 'PRSA_data_vola28_LSTM28_weights.18-0.0006.keras', 'PRSA_data_price_MLP21_weights.74-805.9489.keras', 'PRSA_data_vola21_MLP14_weights.02-0.0011.keras', 'PRSA_data_vola28_MLP21_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM_weights.11-0.0014.keras', 'PRSA_data_vola7_LSTM14_weights.14-0.0036.keras', 'PRSA_data_at_MLP28_weights.03-1.7712.keras', 'PRSA_data_price_LSTM21_weights.20-42874.3555.keras', 'PRSA_data_vola28_LSTM7_weights.20-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.04-0.0009.keras', 'PRSA_data_at_MLP7_weights.08-3.6716.keras', 'PRSA_data_at_MLP21_weights.04-0.0734.keras', 'PRSA_data_vola21_LSTM28_weights.03-0.0021.keras', 'PRSA_data_vola21_MLP14_weights.35-0.0008.keras', 'PRSA_data_vola28_LSTM7_weights.11-0.0012.keras', 'PRSA_data_price_LSTM28_weights.04-42872.1953.keras', 'PRSA_data_at_LSTM_weights.08-0.4803.keras', 'PRSA_data_vola28_MLP14_weights.05-0.0010.keras', 'PRSA_data_price_LSTM14_weights.14-51316.0625.keras', 'PRSA_data_vola7_MLP14_weights.01-0.0101.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0015.keras', 'PRSA_data_vola7_LSTM_weights.01-0.0040.keras', 'PRSA_data_vola14_MLP21_weights.09-0.0011.keras', 'PRSA_data_price_LSTM21_weights.05-43021.9570.keras', 'PRSA_data_vola28_MLP28_weights.03-0.0007.keras', 'PRSA_data_vola7_LSTM14_weights.10-0.0042.keras', 'PRSA_data_price_LSTM_weights.07-67135.3359.keras', 'PRSA_data_vola7_MLP14_weights.18-0.0020.keras', 'PRSA_data_price_LSTM21_weights.18-42894.1055.keras', 'PRSA_data_vola7_MLP7_weights.01-0.0042.keras', 'PRSA_data_vola14_LSTM_weights.05-0.0024.keras', 'PRSA_data_vola21_MLP14_weights.27-0.0008.keras', 'PRSA_data_vola28_MLP28_weights.02-0.0018.keras', 'PRSA_data_vola21_LSTM7_weights.03-0.0017.keras', 'PRSA_data_price_LSTM_weights.10-67105.4297.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0010.keras', 'PRSA_data_vola7_MLP21_weights.03-0.0030.keras', 'PRSA_data_at_MLP7_weights.06-5.3724.keras', 'PRSA_data_at_LSTM_weights.05-1.1226.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0026.keras', 'PRSA_data_vola21_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola14_MLP21_weights.05-0.0012.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.08-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.47-0.0008.keras', 'PRSA_data_price_LSTM14_weights.16-51296.3633.keras', 'PRSA_data_at_MLP7_weights.02-16.3421.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0030.keras', 'PRSA_data_vola21_MLP21_weights.09-0.0009.keras', 'PRSA_data_at_LSTM14_weights.01-6.2168.keras', 'PRSA_data_vola28_LSTM21_weights.01-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.16-0.0009.keras', 'PRSA_data_vola21_LSTM28_weights.07-0.0016.keras', 'PRSA_data_vola21_MLP28_weights.19-0.0010.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0009.keras', 'PRSA_data_vola14_MLP7_weights.09-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.02-0.0011.keras', 'PRSA_data_vola28_LSTM14_weights.03-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.09-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.09-0.0012.keras', 'PRSA_data_vola28_MLP21_weights.65-0.0007.keras', 'PRSA_data_vola21_MLP28_weights.15-0.0011.keras', 'PRSA_data_at_MLP7_weights.42-0.8694.keras', 'PRSA_data_vola21_LSTM28_weights.18-0.0008.keras', 'PRSA_data_vola21_MLP14_weights.04-0.0008.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0040.keras', 'PRSA_data_vola21_LSTM14_weights.12-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0029.keras', 'PRSA_data_vola21_MLP28_weights.01-0.0026.keras', 'PRSA_data_price_LSTM_weights.13-67075.9453.keras', 'PRSA_data_vola14_MLP7_weights.01-0.0041.keras', 'PRSA_data_price_MLP21_weights.02-1476.1034.keras', 'PRSA_data_vola14_LSTM28_weights.19-0.0014.keras', 'PRSA_data_price_MLP14_weights.96-663.0834.keras', 'PRSA_data_vola7_MLP21_weights.94-0.0024.keras', 'PRSA_data_price_LSTM21_weights.03-43041.9570.keras', 'PRSA_data_price_MLP7_weights.12-3798.9531.keras', 'PRSA_data_price_LSTM28_weights.10-42813.0352.keras', 'PRSA_data_vola14_MLP28_weights.20-0.0014.keras', 'PRSA_data_vola7_MLP21_weights.04-0.0027.keras', 'PRSA_data_vola7_LSTM28_weights.18-0.0035.keras', 'PRSA_data_at_MLP7_weights.10-2.7632.keras', 'PRSA_data_price_LSTM_weights.06-67145.4141.keras', 'PRSA_data_price_LSTM28_weights.13-42783.9805.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM28_weights.01-42902.4570.keras', 'PRSA_data_price_LSTM_weights.20-67005.9609.keras', 'PRSA_data_vola7_LSTM21_weights.04-0.0034.keras', 'PRSA_data_at_MLP14_weights.03-0.0461.keras', 'PRSA_data_vola28_MLP28_weights.01-0.0042.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0455.keras', 'PRSA_data_price_MLP28_weights.17-1182.9105.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0018.keras', 'PRSA_data_vola28_MLP14_weights.38-0.0006.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0013.keras', 'PRSA_data_vola7_LSTM21_weights.01-0.0038.keras', 'PRSA_data_price_LSTM14_weights.17-51286.4570.keras', 'PRSA_data_price_LSTM28_weights.16-42754.8008.keras', 'PRSA_data_price_LSTM14_weights.11-51345.4023.keras', 'PRSA_data_price_MLP14_weights.22-638.4409.keras', 'PRSA_data_vola7_MLP28_weights.32-0.0029.keras', 'PRSA_data_price_MLP7_weights.48-1648.1864.keras', 'PRSA_data_vola28_LSTM28_weights.10-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.03-0.0021.keras', 'PRSA_data_vola21_LSTM21_weights.13-0.0010.keras', 'PRSA_data_at_MLP7_weights.07-4.2627.keras', 'PRSA_data_vola7_LSTM21_weights.17-0.0029.keras', 'PRSA_data_vola21_LSTM28_weights.08-0.0011.keras', 'PRSA_data_price_MLP21_weights.05-1072.5081.keras', 'PRSA_data_vola7_LSTM21_weights.05-0.0030.keras', 'PRSA_data_price_MLP14_weights.29-856.8320.keras', 'PRSA_data_vola14_LSTM21_weights.11-0.0011.keras', 'PRSA_data_price_LSTM21_weights.10-42972.4219.keras', 'PRSA_data_price_LSTM_weights.02-67185.9453.keras', 'PRSA_data_vola28_MLP14_weights.21-0.0007.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.10-0.0009.keras', 'PRSA_data_price_LSTM28_weights.15-42764.5898.keras', 'PRSA_data_at_LSTM14_weights.07-0.7059.keras', 'PRSA_data_vola21_LSTM21_weights.01-0.0048.keras', 'PRSA_data_at_LSTM14_weights.03-1.3422.keras', 'PRSA_data_vola14_LSTM14_weights.02-0.0015.keras', 'PRSA_data_vola14_MLP14_weights.02-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.02-0.0034.keras', 'PRSA_data_vola7_MLP21_weights.21-0.0025.keras', 'PRSA_data_at_MLP7_weights.18-1.8396.keras', 'PRSA_data_price_LSTM_weights.11-67095.6172.keras', 'PRSA_data_at_LSTM14_weights.12-0.6378.keras', 'PRSA_data_vola21_LSTM21_weights.07-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.45-0.0030.keras', 'PRSA_data_at_MLP7_weights.26-0.8897.keras', 'PRSA_data_vola21_MLP7_weights.01-0.0014.keras', 'PRSA_data_price_MLP14_weights.19-982.5739.keras', 'PRSA_data_vola14_LSTM_weights.16-0.0017.keras', 'PRSA_data_price_MLP21_weights.94-813.8954.keras', 'PRSA_data_vola7_LSTM21_weights.20-0.0029.keras', 'PRSA_data_at_LSTM_weights.03-1.4842.keras', 'PRSA_data_vola21_MLP28_weights.02-0.0015.keras', 'PRSA_data_price_LSTM21_weights.12-42952.9453.keras', 'PRSA_data_price_MLP28_weights.17-1019.2816.keras', 'PRSA_data_vola14_LSTM_weights.13-0.0019.keras', 'PRSA_data_vola7_LSTM_weights.02-0.0038.keras', 'PRSA_data_vola7_LSTM28_weights.02-0.0042.keras', 'PRSA_data_at_LSTM14_weights.13-0.2270.keras', 'PRSA_data_price_LSTM_weights.18-67026.2891.keras', 'PRSA_data_vola28_MLP21_weights.66-0.0007.keras', 'PRSA_data_at_MLP14_weights.42-0.0373.keras', 'PRSA_data_vola7_MLP14_weights.38-0.0020.keras', 'PRSA_data_vola7_LSTM21_weights.11-0.0029.keras', 'PRSA_data_vola21_LSTM21_weights.12-0.0010.keras', 'PRSA_data_at_LSTM_weights.14-0.5158.keras', 'PRSA_data_vola14_LSTM21_weights.01-0.0019.keras', 'PRSA_data_vola28_LSTM21_weights.11-0.0009.keras', 'PRSA_data_vola7_LSTM28_weights.05-0.0038.keras', 'PRSA_data_vola14_MLP14_weights.10-0.0005.keras', 'PRSA_data_at_MLP21_weights.03-2.1540.keras', 'PRSA_data_at_LSTM_weights.16-0.5377.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.44-0.0008.keras', 'PRSA_data_at_MLP7_weights.16-1.9941.keras', 'PRSA_data_vola28_LSTM28_weights.01-0.0010.keras', 'PRSA_data_vola28_LSTM14_weights.19-0.0006.keras', 'PRSA_data_price_LSTM21_weights.14-42933.4180.keras', 'PRSA_data_vola14_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.82-828.7976.keras', 'PRSA_data_vola28_MLP21_weights.01-0.0030.keras', 'PRSA_data_price_LSTM14_weights.10-51355.2070.keras', 'PRSA_data_vola21_MLP21_weights.18-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.73-0.0007.keras', 'PRSA_data_vola28_MLP7_weights.20-0.0011.keras', 'PRSA_data_price_LSTM21_weights.13-42943.1719.keras', 'PRSA_data_vola28_MLP28_weights.36-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.04-0.0025.keras', 'PRSA_data_at_MLP14_weights.02-0.2715.keras', 'PRSA_data_price_LSTM14_weights.05-51404.8945.keras', 'PRSA_data_price_LSTM28_weights.19-42725.4297.keras', 'PRSA_data_price_MLP28_weights.01-1561.6656.keras', 'PRSA_data_vola21_MLP7_weights.14-0.0011.keras', 'PRSA_data_vola7_MLP7_weights.23-0.0033.keras', 'PRSA_data_vola14_MLP28_weights.01-0.0026.keras', 'PRSA_data_at_MLP14_weights.29-0.0323.keras', 'PRSA_data_vola7_MLP21_weights.01-0.0033.keras', 'PRSA_data_vola21_LSTM_weights.19-0.0014.keras', 'PRSA_data_price_MLP7_weights.03-5953.8076.keras', 'PRSA_data_price_MLP21_weights.42-948.6628.keras', 'PRSA_data_vola21_LSTM14_weights.07-0.0008.keras', 'PRSA_data_vola14_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola7_LSTM28_weights.13-0.0037.keras', 'PRSA_data_vola21_MLP21_weights.01-0.0016.keras', 'PRSA_data_vola14_LSTM21_weights.08-0.0011.keras', 'PRSA_data_price_LSTM21_weights.06-43011.9570.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0019.keras', 'PRSA_data_vola7_MLP28_weights.16-0.0033.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0008.keras', 'PRSA_data_vola28_MLP14_weights.01-0.0031.keras', 'PRSA_data_vola14_MLP28_weights.11-0.0015.keras', 'PRSA_data_price_LSTM14_weights.03-51425.0000.keras', 'PRSA_data_vola7_LSTM28_weights.04-0.0039.keras', 'PRSA_data_vola28_LSTM7_weights.13-0.0011.keras', 'PRSA_data_price_LSTM14_weights.01-51445.4883.keras', 'PRSA_data_vola28_MLP28_weights.22-0.0006.keras', 'PRSA_data_price_LSTM_weights.09-67115.3359.keras', 'PRSA_data_vola28_LSTM7_weights.14-0.0011.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0050.keras', 'PRSA_data_vola21_MLP28_weights.17-0.0010.keras', 'PRSA_data_vola28_LSTM21_weights.09-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.13-0.0010.keras', 'PRSA_data_vola14_MLP14_weights.01-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.50-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.2184.keras', 'PRSA_data_vola14_LSTM_weights.02-0.0030.keras', 'PRSA_data_vola7_MLP28_weights.39-0.0030.keras', 'PRSA_data_price_LSTM21_weights.04-43031.9414.keras', 'PRSA_data_price_LSTM_weights.15-67056.2109.keras', 'PRSA_data_at_MLP7_weights.20-1.6814.keras', 'PRSA_data_vola28_LSTM7_weights.18-0.0010.keras', 'PRSA_data_at_MLP7_weights.95-0.2130.keras', 'PRSA_data_vola14_LSTM28_weights.03-0.0021.keras', 'PRSA_data_at_LSTM_weights.01-7.5653.keras', 'PRSA_data_at_MLP7_weights.05-7.2110.keras', 'PRSA_data_vola21_LSTM28_weights.20-0.0008.keras', 'PRSA_data_at_LSTM_weights.07-0.8569.keras', 'PRSA_data_vola14_LSTM28_weights.12-0.0014.keras', 'PRSA_data_price_LSTM21_weights.16-42913.7969.keras', 'PRSA_data_at_MLP28_weights.04-1.3772.keras', 'PRSA_data_price_LSTM_weights.08-67125.3047.keras', 'PRSA_data_price_LSTM14_weights.07-51384.8633.keras', 'PRSA_data_price_LSTM_weights.04-67165.6016.keras', 'PRSA_data_vola14_LSTM28_weights.15-0.0014.keras', 'PRSA_data_at_MLP14_weights.01-0.4195.keras', 'PRSA_data_price_LSTM28_weights.09-42822.7578.keras', 'PRSA_data_price_LSTM28_weights.03-42882.1953.keras', 'PRSA_data_at_LSTM14_weights.05-0.8408.keras', 'PRSA_data_vola7_MLP28_weights.08-0.0030.keras', 'PRSA_data_price_MLP14_weights.07-3004.8474.keras', 'PRSA_data_price_LSTM21_weights.15-42923.6367.keras', 'PRSA_data_vola7_MLP21_weights.81-0.0025.keras', 'PRSA_data_price_MLP21_weights.86-819.7106.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.05-0.0012.keras', 'PRSA_data_vola21_MLP14_weights.20-0.0007.keras', 'PRSA_data_vola14_MLP28_weights.42-0.0011.keras', 'PRSA_data_price_LSTM_weights.12-67085.7578.keras', 'PRSA_data_vola28_MLP21_weights.13-0.0008.keras', 'PRSA_data_vola14_MLP28_weights.08-0.0016.keras', 'PRSA_data_at_MLP7_weights.86-0.0883.keras', 'PRSA_data_vola28_LSTM14_weights.01-0.0020.keras', 'PRSA_data_vola21_MLP21_weights.71-0.0010.keras', 'PRSA_data_at_MLP28_weights.09-0.1537.keras', 'PRSA_data_vola7_MLP28_weights.48-0.0030.keras', 'PRSA_data_at_MLP28_weights.12-0.0354.keras', 'PRSA_data_at_LSTM14_weights.04-0.9732.keras', 'PRSA_data_at_LSTM14_weights.15-0.2154.keras', 'PRSA_data_vola14_MLP28_weights.21-0.0013.keras', 'PRSA_data_vola21_MLP14_weights.25-0.0009.keras', 'PRSA_data_vola14_LSTM14_weights.01-0.0027.keras', 'PRSA_data_vola21_LSTM28_weights.01-0.0039.keras', 'PRSA_data_price_MLP14_weights.02-8059.2510.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0013.keras', 'PRSA_data_vola28_MLP7_weights.04-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.02-0.0043.keras', 'PRSA_data_price_MLP14_weights.78-683.9230.keras', 'PRSA_data_price_MLP21_weights.19-1002.5925.keras', 'PRSA_data_vola28_MLP21_weights.92-0.0007.keras', 'PRSA_data_price_LSTM28_weights.14-42774.2500.keras', 'PRSA_data_vola21_MLP21_weights.06-0.0010.keras', 'PRSA_data_vola14_LSTM14_weights.03-0.0010.keras', 'PRSA_data_price_LSTM_weights.16-67046.2422.keras', 'PRSA_data_vola7_LSTM28_weights.07-0.0037.keras', 'PRSA_data_vola28_LSTM7_weights.01-0.0017.keras', 'PRSA_data_vola28_LSTM14_weights.02-0.0008.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0022.keras', 'PRSA_data_at_MLP21_weights.03-0.2039.keras', 'PRSA_data_vola14_LSTM28_weights.02-0.0023.keras', 'PRSA_data_at_MLP7_weights.03-13.2212.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0037.keras', 'PRSA_data_vola14_MLP21_weights.69-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.18-0.0013.keras', 'PRSA_data_vola7_MLP21_weights.49-0.0025.keras', 'PRSA_data_vola28_MLP28_weights.31-0.0006.keras', 'PRSA_data_price_MLP21_weights.77-841.5739.keras', 'PRSA_data_vola28_MLP14_weights.35-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.26-0.0031.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0065.keras', 'PRSA_data_price_LSTM14_weights.02-51435.1133.keras', 'PRSA_data_price_LSTM28_weights.20-42714.7695.keras', 'PRSA_data_vola28_LSTM28_weights.16-0.0006.keras', 'PRSA_data_vola14_LSTM28_weights.06-0.0016.keras', 'PRSA_data_vola21_MLP21_weights.13-0.0010.keras', 'PRSA_data_vola28_MLP14_weights.32-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.01-0.0043.keras', 'PRSA_data_vola21_MLP28_weights.08-0.0011.keras', 'PRSA_data_vola21_LSTM28_weights.10-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.14-0.0012.keras', 'PRSA_data_vola7_LSTM28_weights.01-0.0049.keras', 'PRSA_data_price_LSTM28_weights.20-42715.6250.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM_weights.05-67155.5078.keras', 'PRSA_data_price_MLP14_weights.14-1073.6493.keras', 'PRSA_data_vola14_LSTM_weights.03-0.0029.keras', 'PRSA_data_vola14_MLP7_weights.07-0.0021.keras', 'PRSA_data_at_MLP28_weights.03-0.0637.keras', 'PRSA_data_vola21_MLP28_weights.03-0.0012.keras', 'PRSA_data_at_MLP28_weights.05-0.1823.keras', 'PRSA_data_at_MLP21_weights.01-0.9762.keras', 'PRSA_data_vola7_LSTM28_weights.15-0.0035.keras', 'PRSA_data_price_LSTM21_weights.08-42992.0703.keras', 'PRSA_data_vola21_MLP21_weights.22-0.0010.keras', 'PRSA_data_vola28_LSTM28_weights.20-0.0006.keras', 'PRSA_data_at_MLP21_weights.01-4.3241.keras', 'PRSA_data_at_LSTM_weights.06-0.8671.keras', 'PRSA_data_price_MLP7_weights.37-1654.4709.keras', 'PRSA_data_at_LSTM_weights.20-0.4626.keras', 'PRSA_data_vola21_MLP28_weights.27-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.4729.keras', 'PRSA_data_at_MLP7_weights.24-1.4718.keras', 'PRSA_data_at_MLP7_weights.59-0.4636.keras', 'PRSA_data_vola14_LSTM14_weights.06-0.0005.keras', 'PRSA_data_at_MLP7_weights.14-2.2322.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0196.keras', 'PRSA_data_at_LSTM14_weights.19-0.3356.keras', 'PRSA_data_price_LSTM28_weights.18-42735.2305.keras', 'PRSA_data_price_LSTM14_weights.12-51335.6523.keras', 'PRSA_data_vola21_LSTM_weights.06-0.0017.keras', 'PRSA_data_price_LSTM28_weights.05-42862.2383.keras', 'PRSA_data_price_MLP7_weights.10-1248.9592.keras', 'PRSA_data_vola21_MLP28_weights.35-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.03-0.0013.keras', 'PRSA_data_price_MLP14_weights.04-5028.7607.keras', 'PRSA_data_price_LSTM14_weights.08-51374.8945.keras', 'PRSA_data_vola28_MLP14_weights.48-0.0007.keras']
# Predicciones usando el mejor modelo
if best7_lstm_at is not None:
# Asegúrate de que X_val_price7 y X_test_price7 tengan la forma correcta
train_preds_at_LSTM7 = best7_lstm_at.predict(X_train_at_lstm_7)
val_preds_at_LSTM7 = best7_lstm_at.predict(X_val_at_lstm_7)
test_preds_at_LSTM7 = best7_lstm_at.predict(X_test_at_lstm_7)
# Aplanar las predicciones si es necesario
train_preds_at_LSTM7 = np.squeeze(train_preds_at_LSTM7)
val_preds_at_LSTM7 = np.squeeze(val_preds_at_LSTM7)
test_preds_at_LSTM7 = np.squeeze(test_preds_at_LSTM7)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_at_LSTM7)
print("Predicciones de validación:", val_preds_at_LSTM7)
print("Predicciones de prueba:", test_preds_at_LSTM7)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
156/156 [==============================] - 0s 1ms/step
1/1 [==============================] - 0s 9ms/step
1/1 [==============================] - 0s 8ms/step
Predicciones de Entrenamiento: [ 0.04042768 0.04042768 0.04042768 ... 23.44415 23.453379
23.461021 ]
Predicciones de validación: [23.46674 23.472754 23.475786 23.478655 23.48197 23.484907 23.487526]
Predicciones de prueba: [23.489784 23.492926 23.49611 23.499208 23.501436 23.502111 23.5003 ]
plot_model(data_train_plot_at7[-100:], data_val_plot_at7, data_test_plot_at7, val_preds_at_LSTM7, test_preds_at_LSTM7, "Predicciones usando Memoria a Corto y Largo Paso (LSTM) para un horizonte de 7 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train7_at, jarque_bera_pval_LSTM_train7_at = diagnostic_plots(y_train_at7, train_preds_at_LSTM7)
Ljung-Box LB Statistic: 4453.887754
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_at_LSTM_train = metricas(y_train_at7,train_preds_at_LSTM7)
metrica_at_LSTM_train.index = metrica_at_LSTM_train.index.map({0: 'LSTM Entrenamiento Retorno Acumulado τ = 7'})
metrica_at_LSTM_train['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train7_at], index=metrica_at_LSTM_train.index)
metrica_at_LSTM_train['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train7_at], index=metrica_at_LSTM_train.index)
metrica_at_LSTM_train
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Retorno Acumulado τ = 7 | 733.882 | 2.18% | 0.34 | 0.15 | 99.6% | 0.0 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM7_at, jarque_bera_pvalLSTM7_at = evaluate_residuals(data_test_plot_at7, test_preds_at_LSTM7)
No se rechaza H0: los residuales son independientes (no correlacionados).
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test_at = metricas(y_test_at7,test_preds_at_LSTM7)
metrica_LSTM_test_at.index = metrica_LSTM_test_at.index.map({0: 'LSTM Prueba Retorno Acumulado τ = 7'})
metrica_LSTM_test_at['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM7_at], index=metrica_LSTM_test_at.index)
metrica_LSTM_test_at['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM7_at], index=metrica_LSTM_test_at.index)
metrica_LSTM_test_at
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Retorno Acumulado τ = 7 | 0.4299 | 1.03% | 0.24 | 0.06 | -5043.45% | 0.1001 | 0.7179 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_at_LSTM7)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_at7, train_preds_at_LSTM7, y_val_at7, val_preds_at_LSTM7, y_test_at7, test_preds_at_LSTM7)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra cerca del error/score correspondiente al val_loss de la epoca del mejor modelo, ademas muestra un sesgo considerable a la derecha y una alta variabilidad.
Conjunto de Validación: La medía de los residuales se encuentra relativamente cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 14 días (\(\tau=14\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_at_lstm_14, X_val_at_lstm_14, X_test_at_lstm_14 = change_dimension_lstm(X_train_at14, X_val_at14, X_test_at14)
Shape of 3D arrays X: (4943, 14, 1) (14, 14, 1) (14, 14, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(14,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=10, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_at_lstm_14, y_train_at14)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 27.1s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 27.5s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 27.8s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 27.7s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 27.9s
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 1.0656000763271671
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape14 = 14
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM14_at = build_models_lstm(input_shape14, neurons_list, dropout_rates, 'SGD')
Model: "model_222"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_223 (InputLayer) [(None, 14, 1)] 0
lstm_172 (LSTM) (None, 14, 64) 16896
lstm_173 (LSTM) (None, 32) 12416
dropout_222 (Dropout) (None, 32) 0
dense_630 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_222"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_223 (InputLayer) [(None, 14, 1)] 0
lstm_172 (LSTM) (None, 14, 64) 16896
lstm_173 (LSTM) (None, 32) 12416
dropout_222 (Dropout) (None, 32) 0
dense_630 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_223"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_224 (InputLayer) [(None, 14, 1)] 0
lstm_174 (LSTM) (None, 14, 64) 16896
lstm_175 (LSTM) (None, 32) 12416
dropout_223 (Dropout) (None, 32) 0
dense_631 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_223"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_224 (InputLayer) [(None, 14, 1)] 0
lstm_174 (LSTM) (None, 14, 64) 16896
lstm_175 (LSTM) (None, 32) 12416
dropout_223 (Dropout) (None, 32) 0
dense_631 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_224"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_225 (InputLayer) [(None, 14, 1)] 0
lstm_176 (LSTM) (None, 14, 64) 16896
lstm_177 (LSTM) (None, 32) 12416
dropout_224 (Dropout) (None, 32) 0
dense_632 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_224"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_225 (InputLayer) [(None, 14, 1)] 0
lstm_176 (LSTM) (None, 14, 64) 16896
lstm_177 (LSTM) (None, 32) 12416
dropout_224 (Dropout) (None, 32) 0
dense_632 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_225"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_226 (InputLayer) [(None, 14, 1)] 0
lstm_178 (LSTM) (None, 14, 64) 16896
lstm_179 (LSTM) (None, 32) 12416
dropout_225 (Dropout) (None, 32) 0
dense_633 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_225"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_226 (InputLayer) [(None, 14, 1)] 0
lstm_178 (LSTM) (None, 14, 64) 16896
lstm_179 (LSTM) (None, 32) 12416
dropout_225 (Dropout) (None, 32) 0
dense_633 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_226"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_227 (InputLayer) [(None, 14, 1)] 0
lstm_180 (LSTM) (None, 14, 64) 16896
lstm_181 (LSTM) (None, 32) 12416
dropout_226 (Dropout) (None, 32) 0
dense_634 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_226"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_227 (InputLayer) [(None, 14, 1)] 0
lstm_180 (LSTM) (None, 14, 64) 16896
lstm_181 (LSTM) (None, 32) 12416
dropout_226 (Dropout) (None, 32) 0
dense_634 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_227"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_228 (InputLayer) [(None, 14, 1)] 0
lstm_182 (LSTM) (None, 14, 64) 16896
lstm_183 (LSTM) (None, 32) 12416
dropout_227 (Dropout) (None, 32) 0
dense_635 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_227"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_228 (InputLayer) [(None, 14, 1)] 0
lstm_182 (LSTM) (None, 14, 64) 16896
lstm_183 (LSTM) (None, 32) 12416
dropout_227 (Dropout) (None, 32) 0
dense_635 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_228"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_229 (InputLayer) [(None, 14, 1)] 0
lstm_184 (LSTM) (None, 14, 64) 16896
lstm_185 (LSTM) (None, 32) 12416
dropout_228 (Dropout) (None, 32) 0
dense_636 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_228"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_229 (InputLayer) [(None, 14, 1)] 0
lstm_184 (LSTM) (None, 14, 64) 16896
lstm_185 (LSTM) (None, 32) 12416
dropout_228 (Dropout) (None, 32) 0
dense_636 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_229"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_230 (InputLayer) [(None, 14, 1)] 0
lstm_186 (LSTM) (None, 14, 64) 16896
lstm_187 (LSTM) (None, 32) 12416
dropout_229 (Dropout) (None, 32) 0
dense_637 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_229"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_230 (InputLayer) [(None, 14, 1)] 0
lstm_186 (LSTM) (None, 14, 64) 16896
lstm_187 (LSTM) (None, 32) 12416
dropout_229 (Dropout) (None, 32) 0
dense_637 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_230"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_231 (InputLayer) [(None, 14, 1)] 0
lstm_188 (LSTM) (None, 14, 64) 16896
lstm_189 (LSTM) (None, 32) 12416
dropout_230 (Dropout) (None, 32) 0
dense_638 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_230"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_231 (InputLayer) [(None, 14, 1)] 0
lstm_188 (LSTM) (None, 14, 64) 16896
lstm_189 (LSTM) (None, 32) 12416
dropout_230 (Dropout) (None, 32) 0
dense_638 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_231"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_232 (InputLayer) [(None, 14, 1)] 0
lstm_190 (LSTM) (None, 14, 64) 16896
lstm_191 (LSTM) (None, 32) 12416
dropout_231 (Dropout) (None, 32) 0
dense_639 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_231"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_232 (InputLayer) [(None, 14, 1)] 0
lstm_190 (LSTM) (None, 14, 64) 16896
lstm_191 (LSTM) (None, 32) 12416
dropout_231 (Dropout) (None, 32) 0
dense_639 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_232"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_233 (InputLayer) [(None, 14, 1)] 0
lstm_192 (LSTM) (None, 14, 64) 16896
lstm_193 (LSTM) (None, 32) 12416
dropout_232 (Dropout) (None, 32) 0
dense_640 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_232"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_233 (InputLayer) [(None, 14, 1)] 0
lstm_192 (LSTM) (None, 14, 64) 16896
lstm_193 (LSTM) (None, 32) 12416
dropout_232 (Dropout) (None, 32) 0
dense_640 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_233"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_234 (InputLayer) [(None, 14, 1)] 0
lstm_194 (LSTM) (None, 14, 64) 16896
lstm_195 (LSTM) (None, 32) 12416
dropout_233 (Dropout) (None, 32) 0
dense_641 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_233"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_234 (InputLayer) [(None, 14, 1)] 0
lstm_194 (LSTM) (None, 14, 64) 16896
lstm_195 (LSTM) (None, 32) 12416
dropout_233 (Dropout) (None, 32) 0
dense_641 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_234"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_235 (InputLayer) [(None, 14, 1)] 0
lstm_196 (LSTM) (None, 14, 64) 16896
lstm_197 (LSTM) (None, 32) 12416
dropout_234 (Dropout) (None, 32) 0
dense_642 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_234"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_235 (InputLayer) [(None, 14, 1)] 0
lstm_196 (LSTM) (None, 14, 64) 16896
lstm_197 (LSTM) (None, 32) 12416
dropout_234 (Dropout) (None, 32) 0
dense_642 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_235"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_236 (InputLayer) [(None, 14, 1)] 0
lstm_198 (LSTM) (None, 14, 64) 16896
lstm_199 (LSTM) (None, 32) 12416
dropout_235 (Dropout) (None, 32) 0
dense_643 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_235"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_236 (InputLayer) [(None, 14, 1)] 0
lstm_198 (LSTM) (None, 14, 64) 16896
lstm_199 (LSTM) (None, 32) 12416
dropout_235 (Dropout) (None, 32) 0
dense_643 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_236"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_237 (InputLayer) [(None, 14, 1)] 0
lstm_200 (LSTM) (None, 14, 64) 16896
lstm_201 (LSTM) (None, 32) 12416
dropout_236 (Dropout) (None, 32) 0
dense_644 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_236"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_237 (InputLayer) [(None, 14, 1)] 0
lstm_200 (LSTM) (None, 14, 64) 16896
lstm_201 (LSTM) (None, 32) 12416
dropout_236 (Dropout) (None, 32) 0
dense_644 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_237"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_238 (InputLayer) [(None, 14, 1)] 0
lstm_202 (LSTM) (None, 14, 64) 16896
lstm_203 (LSTM) (None, 32) 12416
dropout_237 (Dropout) (None, 32) 0
dense_645 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_237"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_238 (InputLayer) [(None, 14, 1)] 0
lstm_202 (LSTM) (None, 14, 64) 16896
lstm_203 (LSTM) (None, 32) 12416
dropout_237 (Dropout) (None, 32) 0
dense_645 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_at_LSTM14_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best14_lstm_at = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM7.
import os
from joblib import dump, load
history_at_LSTM14 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM14_at):
filename = f'history_at_LSTM14_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_at_lstm_14, y=y_train_at14, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best14_lstm_at], validation_data=(X_val_at_lstm_14, y_val_at14),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_at_LSTM14.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_at_LSTM14_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM14_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM14_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM14_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM14_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM14_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM14_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM14_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM14_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM14_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM14_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM14_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM14_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM14_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM14_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_at_LSTM14_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_at_LSTM14_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best14_lstm_at = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best14_lstm_at = load_model(best_model_path) # Cargar el modelo
if best14_lstm_at is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_at_LSTM14_weights.15-0.2154.keras con val_loss: 0.2154
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
print("Archivos en el directorio 'keras_models':", files)
Archivos en el directorio 'keras_models': ['PRSA_data_vola14_MLP7_weights.04-0.0018.keras', 'PRSA_data_price_MLP21_weights.01-3579.4128.keras', 'PRSA_data_price_LSTM21_weights.01-43062.3047.keras', 'PRSA_data_price_LSTM28_weights.02-42892.2305.keras', 'PRSA_data_price_LSTM_weights.19-67016.3047.keras', 'PRSA_data_vola14_MLP21_weights.20-0.0011.keras', 'PRSA_data_price_LSTM14_weights.06-51394.8750.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.07-0.0033.keras', 'PRSA_data_price_LSTM21_weights.02-43052.0469.keras', 'PRSA_data_at_MLP7_weights.80-0.0780.keras', 'PRSA_data_vola21_LSTM28_weights.02-0.0022.keras', 'PRSA_data_vola21_LSTM14_weights.04-0.0011.keras', 'PRSA_data_vola7_LSTM21_weights.18-0.0029.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0013.keras', 'PRSA_data_vola28_MLP14_weights.04-0.0013.keras', 'PRSA_data_price_MLP14_weights.52-626.2623.keras', 'PRSA_data_vola7_MLP7_weights.15-0.0041.keras', 'PRSA_data_price_LSTM14_weights.13-51325.8516.keras', 'PRSA_data_vola14_MLP7_weights.02-0.0023.keras', 'PRSA_data_price_LSTM21_weights.11-42962.6562.keras', 'PRSA_data_vola7_LSTM28_weights.10-0.0037.keras', 'PRSA_data_at_LSTM_weights.04-1.2071.keras', 'PRSA_data_price_MLP7_weights.50-1342.5273.keras', 'PRSA_data_price_MLP14_weights.03-5108.7837.keras', 'PRSA_data_vola28_MLP7_weights.14-0.0012.keras', 'PRSA_data_at_MLP7_weights.11-2.4883.keras', 'PRSA_data_price_LSTM28_weights.08-42832.5430.keras', 'PRSA_data_price_MLP28_weights.01-9242.6553.keras', 'PRSA_data_vola21_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.16-1013.2251.keras', 'PRSA_data_price_MLP28_weights.13-1042.5178.keras', 'PRSA_data_vola28_MLP28_weights.34-0.0006.keras', 'PRSA_data_at_MLP7_weights.01-19.6635.keras', 'PRSA_data_at_MLP7_weights.19-1.8379.keras', 'PRSA_data_at_MLP7_weights.12-2.3991.keras', 'PRSA_data_price_MLP7_weights.15-1346.3275.keras', 'PRSA_data_vola14_MLP28_weights.02-0.0024.keras', 'PRSA_data_vola14_MLP28_weights.07-0.0017.keras', 'PRSA_data_at_MLP28_weights.05-0.0930.keras', 'PRSA_data_price_LSTM14_weights.20-51256.7266.keras', 'PRSA_data_vola21_LSTM21_weights.05-0.0013.keras', 'PRSA_data_price_MLP14_weights.12-1304.7009.keras', 'PRSA_data_vola7_LSTM14_weights.09-0.0044.keras', 'PRSA_data_vola28_MLP28_weights.50-0.0006.keras', 'PRSA_data_vola7_MLP14_weights.15-0.0020.keras', 'PRSA_data_price_MLP14_weights.08-1386.5365.keras', 'PRSA_data_price_LSTM21_weights.09-42982.2383.keras', 'PRSA_data_price_MLP28_weights.11-7366.8994.keras', 'PRSA_data_vola14_MLP7_weights.18-0.0015.keras', 'PRSA_data_price_MLP7_weights.14-1734.7238.keras', 'PRSA_data_price_LSTM14_weights.15-51306.2070.keras', 'PRSA_data_vola21_MLP28_weights.25-0.0010.keras', 'PRSA_data_at_LSTM14_weights.02-2.2175.keras', 'PRSA_data_vola28_LSTM7_weights.08-0.0014.keras', 'PRSA_data_price_MLP28_weights.10-1107.4047.keras', 'PRSA_data_vola28_MLP7_weights.01-0.0068.keras', 'PRSA_data_vola21_LSTM21_weights.03-0.0015.keras', 'PRSA_data_price_MLP7_weights.01-9253.3047.keras', 'PRSA_data_vola21_MLP14_weights.31-0.0007.keras', 'PRSA_data_price_LSTM28_weights.12-42793.6172.keras', 'PRSA_data_price_LSTM14_weights.09-51365.0000.keras', 'PRSA_data_vola28_MLP28_weights.42-0.0006.keras', 'PRSA_data_price_MLP7_weights.05-4792.5054.keras', 'PRSA_data_vola21_LSTM_weights.01-0.0024.keras', 'PRSA_data_price_LSTM28_weights.17-42745.0352.keras', 'PRSA_data_vola28_MLP28_weights.45-0.0006.keras', 'PRSA_data_vola28_MLP14_weights.08-0.0010.keras', 'PRSA_data_at_LSTM_weights.11-0.6498.keras', 'PRSA_data_vola28_MLP28_weights.39-0.0007.keras', 'PRSA_data_at_MLP21_weights.11-0.0326.keras', 'PRSA_data_price_LSTM_weights.20-67006.4141.keras', 'PRSA_data_vola28_MLP7_weights.06-0.0014.keras', 'PRSA_data_at_LSTM14_weights.06-0.8145.keras', 'PRSA_data_at_MLP28_weights.01-2.1902.keras', 'PRSA_data_price_MLP14_weights.46-814.1992.keras', '.DS_Store', 'PRSA_data_vola28_LSTM28_weights.12-0.0006.keras', 'PRSA_data_price_LSTM14_weights.18-51276.5625.keras', 'PRSA_data_vola21_LSTM14_weights.01-0.0078.keras', 'PRSA_data_vola14_MLP21_weights.04-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM14_weights.02-0.0011.keras', 'PRSA_data_at_MLP7_weights.09-2.9793.keras', 'PRSA_data_vola21_MLP14_weights.06-0.0007.keras', 'PRSA_data_vola14_MLP21_weights.78-0.0010.keras', 'PRSA_data_price_MLP14_weights.64-711.9107.keras', 'PRSA_data_vola14_MLP28_weights.31-0.0011.keras', 'PRSA_data_vola14_LSTM_weights.08-0.0020.keras', 'PRSA_data_vola14_LSTM21_weights.02-0.0018.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0014.keras', 'PRSA_data_vola14_MLP14_weights.29-0.0005.keras', 'PRSA_data_price_LSTM_weights.03-67175.7734.keras', 'PRSA_data_vola14_LSTM_weights.01-0.0068.keras', 'PRSA_data_price_LSTM28_weights.11-42803.3086.keras', 'PRSA_data_price_MLP28_weights.15-7107.3306.keras', 'PRSA_data_vola14_MLP21_weights.49-0.0010.keras', 'PRSA_data_vola28_MLP7_weights.07-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0010.keras', 'PRSA_data_at_LSTM14_weights.15-0.4082.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0011.keras', 'PRSA_data_vola14_MLP14_weights.06-0.0006.keras', 'PRSA_data_vola14_LSTM21_weights.06-0.0012.keras', 'PRSA_data_price_LSTM_weights.17-67036.2891.keras', 'PRSA_data_vola28_MLP7_weights.03-0.0018.keras', 'PRSA_data_vola7_MLP28_weights.02-0.0039.keras', 'PRSA_data_vola7_LSTM28_weights.16-0.0035.keras', 'PRSA_data_vola21_MLP7_weights.20-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.33-0.0009.keras', 'PRSA_data_price_MLP21_weights.64-870.7020.keras', 'PRSA_data_vola21_MLP14_weights.22-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.14-0.0020.keras', 'PRSA_data_price_LSTM21_weights.17-42903.9648.keras', 'PRSA_data_vola21_MLP14_weights.01-0.0021.keras', 'PRSA_data_vola21_LSTM7_weights.16-0.0011.keras', 'PRSA_data_at_LSTM14_weights.11-0.6656.keras', 'PRSA_data_vola28_MLP7_weights.02-0.0019.keras', 'PRSA_data_vola14_MLP28_weights.05-0.0018.keras', 'PRSA_data_vola14_LSTM14_weights.08-0.0005.keras', 'PRSA_data_at_MLP21_weights.02-0.8171.keras', 'PRSA_data_price_MLP28_weights.02-8134.2202.keras', 'PRSA_data_price_LSTM28_weights.06-42852.3320.keras', 'PRSA_data_vola7_MLP14_weights.02-0.0036.keras', 'PRSA_data_vola14_LSTM28_weights.01-0.0026.keras', 'PRSA_data_at_MLP7_weights.04-9.8837.keras', 'PRSA_data_price_MLP28_weights.16-1274.4368.keras', 'PRSA_data_price_LSTM21_weights.07-43002.0078.keras', 'PRSA_data_vola7_MLP28_weights.23-0.0032.keras', 'PRSA_data_price_LSTM_weights.01-67196.3359.keras', 'PRSA_data_at_MLP28_weights.02-1.9156.keras', 'PRSA_data_vola14_MLP21_weights.02-0.0016.keras', 'PRSA_data_vola28_MLP14_weights.14-0.0007.keras', 'PRSA_data_price_LSTM28_weights.07-42842.4375.keras', 'PRSA_data_price_MLP14_weights.37-825.1805.keras', 'PRSA_data_vola7_MLP14_weights.07-0.0021.keras', 'PRSA_data_price_LSTM14_weights.04-51414.8945.keras', 'PRSA_data_price_MLP7_weights.42-1446.1339.keras', 'PRSA_data_price_MLP21_weights.57-932.6497.keras', 'PRSA_data_price_LSTM14_weights.19-51266.6133.keras', 'PRSA_data_vola21_MLP14_weights.34-0.0008.keras', 'PRSA_data_price_MLP14_weights.01-10485.8262.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0012.keras', 'PRSA_data_at_LSTM_weights.02-2.4966.keras', 'PRSA_data_vola21_LSTM28_weights.15-0.0009.keras', 'PRSA_data_price_LSTM_weights.20-67006.3047.keras', 'PRSA_data_price_LSTM_weights.14-67066.1016.keras', 'PRSA_data_price_LSTM21_weights.19-42884.2422.keras', 'PRSA_data_vola28_LSTM21_weights.18-0.0008.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0013.keras', 'PRSA_data_price_LSTM28_weights.20-42714.5508.keras', 'PRSA_data_at_LSTM14_weights.13-0.5609.keras', 'PRSA_data_vola28_LSTM28_weights.04-0.0009.keras', 'PRSA_data_vola14_MLP21_weights.01-0.0118.keras', 'PRSA_data_vola7_MLP21_weights.02-0.0032.keras', 'PRSA_data_at_MLP7_weights.74-0.2838.keras', 'PRSA_data_price_MLP28_weights.17-1217.0461.keras', 'PRSA_data_at_MLP21_weights.16-1.1973.keras', 'PRSA_data_vola7_MLP7_weights.02-0.0036.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0015.keras', 'PRSA_data_vola14_LSTM14_weights.04-0.0006.keras', 'PRSA_data_vola28_LSTM28_weights.18-0.0006.keras', 'PRSA_data_price_MLP21_weights.74-805.9489.keras', 'PRSA_data_vola21_MLP14_weights.02-0.0011.keras', 'PRSA_data_vola28_MLP21_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM_weights.11-0.0014.keras', 'PRSA_data_vola7_LSTM14_weights.14-0.0036.keras', 'PRSA_data_at_MLP28_weights.03-1.7712.keras', 'PRSA_data_price_LSTM21_weights.20-42874.3555.keras', 'PRSA_data_vola28_LSTM7_weights.20-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.04-0.0009.keras', 'PRSA_data_at_MLP7_weights.08-3.6716.keras', 'PRSA_data_at_MLP21_weights.04-0.0734.keras', 'PRSA_data_vola21_LSTM28_weights.03-0.0021.keras', 'PRSA_data_vola21_MLP14_weights.35-0.0008.keras', 'PRSA_data_vola28_LSTM7_weights.11-0.0012.keras', 'PRSA_data_price_LSTM28_weights.04-42872.1953.keras', 'PRSA_data_at_LSTM_weights.08-0.4803.keras', 'PRSA_data_vola28_MLP14_weights.05-0.0010.keras', 'PRSA_data_price_LSTM14_weights.14-51316.0625.keras', 'PRSA_data_vola7_MLP14_weights.01-0.0101.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0015.keras', 'PRSA_data_vola7_LSTM_weights.01-0.0040.keras', 'PRSA_data_vola14_MLP21_weights.09-0.0011.keras', 'PRSA_data_price_LSTM21_weights.05-43021.9570.keras', 'PRSA_data_vola28_MLP28_weights.03-0.0007.keras', 'PRSA_data_vola7_LSTM14_weights.10-0.0042.keras', 'PRSA_data_price_LSTM_weights.07-67135.3359.keras', 'PRSA_data_vola7_MLP14_weights.18-0.0020.keras', 'PRSA_data_price_LSTM21_weights.18-42894.1055.keras', 'PRSA_data_vola7_MLP7_weights.01-0.0042.keras', 'PRSA_data_vola14_LSTM_weights.05-0.0024.keras', 'PRSA_data_vola21_MLP14_weights.27-0.0008.keras', 'PRSA_data_vola28_MLP28_weights.02-0.0018.keras', 'PRSA_data_vola21_LSTM7_weights.03-0.0017.keras', 'PRSA_data_price_LSTM_weights.10-67105.4297.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0010.keras', 'PRSA_data_vola7_MLP21_weights.03-0.0030.keras', 'PRSA_data_at_MLP7_weights.06-5.3724.keras', 'PRSA_data_at_LSTM_weights.05-1.1226.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0026.keras', 'PRSA_data_vola21_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola14_MLP21_weights.05-0.0012.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.08-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.47-0.0008.keras', 'PRSA_data_price_LSTM14_weights.16-51296.3633.keras', 'PRSA_data_at_MLP7_weights.02-16.3421.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0030.keras', 'PRSA_data_vola21_MLP21_weights.09-0.0009.keras', 'PRSA_data_at_LSTM14_weights.01-6.2168.keras', 'PRSA_data_vola28_LSTM21_weights.01-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.16-0.0009.keras', 'PRSA_data_vola21_LSTM28_weights.07-0.0016.keras', 'PRSA_data_vola21_MLP28_weights.19-0.0010.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0009.keras', 'PRSA_data_vola14_MLP7_weights.09-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.02-0.0011.keras', 'PRSA_data_vola28_LSTM14_weights.03-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.09-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.09-0.0012.keras', 'PRSA_data_vola28_MLP21_weights.65-0.0007.keras', 'PRSA_data_vola21_MLP28_weights.15-0.0011.keras', 'PRSA_data_at_MLP7_weights.42-0.8694.keras', 'PRSA_data_vola21_LSTM28_weights.18-0.0008.keras', 'PRSA_data_vola21_MLP14_weights.04-0.0008.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0040.keras', 'PRSA_data_vola21_LSTM14_weights.12-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0029.keras', 'PRSA_data_vola21_MLP28_weights.01-0.0026.keras', 'PRSA_data_price_LSTM_weights.13-67075.9453.keras', 'PRSA_data_vola14_MLP7_weights.01-0.0041.keras', 'PRSA_data_price_MLP21_weights.02-1476.1034.keras', 'PRSA_data_vola14_LSTM28_weights.19-0.0014.keras', 'PRSA_data_price_MLP14_weights.96-663.0834.keras', 'PRSA_data_vola7_MLP21_weights.94-0.0024.keras', 'PRSA_data_price_LSTM21_weights.03-43041.9570.keras', 'PRSA_data_price_MLP7_weights.12-3798.9531.keras', 'PRSA_data_price_LSTM28_weights.10-42813.0352.keras', 'PRSA_data_vola14_MLP28_weights.20-0.0014.keras', 'PRSA_data_vola7_MLP21_weights.04-0.0027.keras', 'PRSA_data_vola7_LSTM28_weights.18-0.0035.keras', 'PRSA_data_at_MLP7_weights.10-2.7632.keras', 'PRSA_data_price_LSTM_weights.06-67145.4141.keras', 'PRSA_data_price_LSTM28_weights.13-42783.9805.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM28_weights.01-42902.4570.keras', 'PRSA_data_price_LSTM_weights.20-67005.9609.keras', 'PRSA_data_vola7_LSTM21_weights.04-0.0034.keras', 'PRSA_data_at_MLP14_weights.03-0.0461.keras', 'PRSA_data_vola28_MLP28_weights.01-0.0042.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0455.keras', 'PRSA_data_price_MLP28_weights.17-1182.9105.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0018.keras', 'PRSA_data_vola28_MLP14_weights.38-0.0006.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0013.keras', 'PRSA_data_vola7_LSTM21_weights.01-0.0038.keras', 'PRSA_data_price_LSTM14_weights.17-51286.4570.keras', 'PRSA_data_price_LSTM28_weights.16-42754.8008.keras', 'PRSA_data_price_LSTM14_weights.11-51345.4023.keras', 'PRSA_data_price_MLP14_weights.22-638.4409.keras', 'PRSA_data_vola7_MLP28_weights.32-0.0029.keras', 'PRSA_data_price_MLP7_weights.48-1648.1864.keras', 'PRSA_data_vola28_LSTM28_weights.10-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.03-0.0021.keras', 'PRSA_data_vola21_LSTM21_weights.13-0.0010.keras', 'PRSA_data_at_MLP7_weights.07-4.2627.keras', 'PRSA_data_vola7_LSTM21_weights.17-0.0029.keras', 'PRSA_data_vola21_LSTM28_weights.08-0.0011.keras', 'PRSA_data_price_MLP21_weights.05-1072.5081.keras', 'PRSA_data_vola7_LSTM21_weights.05-0.0030.keras', 'PRSA_data_price_MLP14_weights.29-856.8320.keras', 'PRSA_data_vola14_LSTM21_weights.11-0.0011.keras', 'PRSA_data_price_LSTM21_weights.10-42972.4219.keras', 'PRSA_data_price_LSTM_weights.02-67185.9453.keras', 'PRSA_data_vola28_MLP14_weights.21-0.0007.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.10-0.0009.keras', 'PRSA_data_price_LSTM28_weights.15-42764.5898.keras', 'PRSA_data_at_LSTM14_weights.07-0.7059.keras', 'PRSA_data_vola21_LSTM21_weights.01-0.0048.keras', 'PRSA_data_at_LSTM14_weights.03-1.3422.keras', 'PRSA_data_vola14_LSTM14_weights.02-0.0015.keras', 'PRSA_data_vola14_MLP14_weights.02-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.02-0.0034.keras', 'PRSA_data_vola7_MLP21_weights.21-0.0025.keras', 'PRSA_data_at_MLP7_weights.18-1.8396.keras', 'PRSA_data_price_LSTM_weights.11-67095.6172.keras', 'PRSA_data_at_LSTM14_weights.12-0.6378.keras', 'PRSA_data_vola21_LSTM21_weights.07-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.45-0.0030.keras', 'PRSA_data_at_MLP7_weights.26-0.8897.keras', 'PRSA_data_vola21_MLP7_weights.01-0.0014.keras', 'PRSA_data_price_MLP14_weights.19-982.5739.keras', 'PRSA_data_vola14_LSTM_weights.16-0.0017.keras', 'PRSA_data_price_MLP21_weights.94-813.8954.keras', 'PRSA_data_vola7_LSTM21_weights.20-0.0029.keras', 'PRSA_data_at_LSTM_weights.03-1.4842.keras', 'PRSA_data_vola21_MLP28_weights.02-0.0015.keras', 'PRSA_data_price_LSTM21_weights.12-42952.9453.keras', 'PRSA_data_price_MLP28_weights.17-1019.2816.keras', 'PRSA_data_vola14_LSTM_weights.13-0.0019.keras', 'PRSA_data_vola7_LSTM_weights.02-0.0038.keras', 'PRSA_data_vola7_LSTM28_weights.02-0.0042.keras', 'PRSA_data_at_LSTM14_weights.13-0.2270.keras', 'PRSA_data_price_LSTM_weights.18-67026.2891.keras', 'PRSA_data_vola28_MLP21_weights.66-0.0007.keras', 'PRSA_data_at_MLP14_weights.42-0.0373.keras', 'PRSA_data_vola7_MLP14_weights.38-0.0020.keras', 'PRSA_data_vola7_LSTM21_weights.11-0.0029.keras', 'PRSA_data_vola21_LSTM21_weights.12-0.0010.keras', 'PRSA_data_at_LSTM_weights.14-0.5158.keras', 'PRSA_data_vola14_LSTM21_weights.01-0.0019.keras', 'PRSA_data_vola28_LSTM21_weights.11-0.0009.keras', 'PRSA_data_vola7_LSTM28_weights.05-0.0038.keras', 'PRSA_data_vola14_MLP14_weights.10-0.0005.keras', 'PRSA_data_at_MLP21_weights.03-2.1540.keras', 'PRSA_data_at_LSTM_weights.16-0.5377.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.44-0.0008.keras', 'PRSA_data_at_MLP7_weights.16-1.9941.keras', 'PRSA_data_vola28_LSTM28_weights.01-0.0010.keras', 'PRSA_data_vola28_LSTM14_weights.19-0.0006.keras', 'PRSA_data_price_LSTM21_weights.14-42933.4180.keras', 'PRSA_data_vola14_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.82-828.7976.keras', 'PRSA_data_vola28_MLP21_weights.01-0.0030.keras', 'PRSA_data_price_LSTM14_weights.10-51355.2070.keras', 'PRSA_data_vola21_MLP21_weights.18-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.73-0.0007.keras', 'PRSA_data_vola28_MLP7_weights.20-0.0011.keras', 'PRSA_data_price_LSTM21_weights.13-42943.1719.keras', 'PRSA_data_vola28_MLP28_weights.36-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.04-0.0025.keras', 'PRSA_data_at_MLP14_weights.02-0.2715.keras', 'PRSA_data_price_LSTM14_weights.05-51404.8945.keras', 'PRSA_data_price_LSTM28_weights.19-42725.4297.keras', 'PRSA_data_price_MLP28_weights.01-1561.6656.keras', 'PRSA_data_vola21_MLP7_weights.14-0.0011.keras', 'PRSA_data_vola7_MLP7_weights.23-0.0033.keras', 'PRSA_data_vola14_MLP28_weights.01-0.0026.keras', 'PRSA_data_at_MLP14_weights.29-0.0323.keras', 'PRSA_data_vola7_MLP21_weights.01-0.0033.keras', 'PRSA_data_vola21_LSTM_weights.19-0.0014.keras', 'PRSA_data_price_MLP7_weights.03-5953.8076.keras', 'PRSA_data_price_MLP21_weights.42-948.6628.keras', 'PRSA_data_vola21_LSTM14_weights.07-0.0008.keras', 'PRSA_data_vola14_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola7_LSTM28_weights.13-0.0037.keras', 'PRSA_data_vola21_MLP21_weights.01-0.0016.keras', 'PRSA_data_vola14_LSTM21_weights.08-0.0011.keras', 'PRSA_data_price_LSTM21_weights.06-43011.9570.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0019.keras', 'PRSA_data_vola7_MLP28_weights.16-0.0033.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0008.keras', 'PRSA_data_vola28_MLP14_weights.01-0.0031.keras', 'PRSA_data_vola14_MLP28_weights.11-0.0015.keras', 'PRSA_data_price_LSTM14_weights.03-51425.0000.keras', 'PRSA_data_vola7_LSTM28_weights.04-0.0039.keras', 'PRSA_data_vola28_LSTM7_weights.13-0.0011.keras', 'PRSA_data_price_LSTM14_weights.01-51445.4883.keras', 'PRSA_data_vola28_MLP28_weights.22-0.0006.keras', 'PRSA_data_price_LSTM_weights.09-67115.3359.keras', 'PRSA_data_vola28_LSTM7_weights.14-0.0011.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0050.keras', 'PRSA_data_vola21_MLP28_weights.17-0.0010.keras', 'PRSA_data_vola28_LSTM21_weights.09-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.13-0.0010.keras', 'PRSA_data_vola14_MLP14_weights.01-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.50-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.2184.keras', 'PRSA_data_vola14_LSTM_weights.02-0.0030.keras', 'PRSA_data_vola7_MLP28_weights.39-0.0030.keras', 'PRSA_data_price_LSTM21_weights.04-43031.9414.keras', 'PRSA_data_price_LSTM_weights.15-67056.2109.keras', 'PRSA_data_at_MLP7_weights.20-1.6814.keras', 'PRSA_data_vola28_LSTM7_weights.18-0.0010.keras', 'PRSA_data_at_MLP7_weights.95-0.2130.keras', 'PRSA_data_vola14_LSTM28_weights.03-0.0021.keras', 'PRSA_data_at_LSTM_weights.01-7.5653.keras', 'PRSA_data_at_MLP7_weights.05-7.2110.keras', 'PRSA_data_vola21_LSTM28_weights.20-0.0008.keras', 'PRSA_data_at_LSTM_weights.07-0.8569.keras', 'PRSA_data_vola14_LSTM28_weights.12-0.0014.keras', 'PRSA_data_price_LSTM21_weights.16-42913.7969.keras', 'PRSA_data_at_MLP28_weights.04-1.3772.keras', 'PRSA_data_price_LSTM_weights.08-67125.3047.keras', 'PRSA_data_price_LSTM14_weights.07-51384.8633.keras', 'PRSA_data_price_LSTM_weights.04-67165.6016.keras', 'PRSA_data_vola14_LSTM28_weights.15-0.0014.keras', 'PRSA_data_at_MLP14_weights.01-0.4195.keras', 'PRSA_data_price_LSTM28_weights.09-42822.7578.keras', 'PRSA_data_price_LSTM28_weights.03-42882.1953.keras', 'PRSA_data_at_LSTM14_weights.05-0.8408.keras', 'PRSA_data_vola7_MLP28_weights.08-0.0030.keras', 'PRSA_data_price_MLP14_weights.07-3004.8474.keras', 'PRSA_data_price_LSTM21_weights.15-42923.6367.keras', 'PRSA_data_vola7_MLP21_weights.81-0.0025.keras', 'PRSA_data_price_MLP21_weights.86-819.7106.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.05-0.0012.keras', 'PRSA_data_vola21_MLP14_weights.20-0.0007.keras', 'PRSA_data_vola14_MLP28_weights.42-0.0011.keras', 'PRSA_data_price_LSTM_weights.12-67085.7578.keras', 'PRSA_data_vola28_MLP21_weights.13-0.0008.keras', 'PRSA_data_vola14_MLP28_weights.08-0.0016.keras', 'PRSA_data_at_MLP7_weights.86-0.0883.keras', 'PRSA_data_vola28_LSTM14_weights.01-0.0020.keras', 'PRSA_data_vola21_MLP21_weights.71-0.0010.keras', 'PRSA_data_at_MLP28_weights.09-0.1537.keras', 'PRSA_data_vola7_MLP28_weights.48-0.0030.keras', 'PRSA_data_at_MLP28_weights.12-0.0354.keras', 'PRSA_data_at_LSTM14_weights.04-0.9732.keras', 'PRSA_data_at_LSTM14_weights.15-0.2154.keras', 'PRSA_data_vola14_MLP28_weights.21-0.0013.keras', 'PRSA_data_vola21_MLP14_weights.25-0.0009.keras', 'PRSA_data_vola14_LSTM14_weights.01-0.0027.keras', 'PRSA_data_vola21_LSTM28_weights.01-0.0039.keras', 'PRSA_data_price_MLP14_weights.02-8059.2510.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0013.keras', 'PRSA_data_vola28_MLP7_weights.04-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.02-0.0043.keras', 'PRSA_data_price_MLP14_weights.78-683.9230.keras', 'PRSA_data_price_MLP21_weights.19-1002.5925.keras', 'PRSA_data_vola28_MLP21_weights.92-0.0007.keras', 'PRSA_data_price_LSTM28_weights.14-42774.2500.keras', 'PRSA_data_vola21_MLP21_weights.06-0.0010.keras', 'PRSA_data_vola14_LSTM14_weights.03-0.0010.keras', 'PRSA_data_price_LSTM_weights.16-67046.2422.keras', 'PRSA_data_vola7_LSTM28_weights.07-0.0037.keras', 'PRSA_data_vola28_LSTM7_weights.01-0.0017.keras', 'PRSA_data_vola28_LSTM14_weights.02-0.0008.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0022.keras', 'PRSA_data_at_MLP21_weights.03-0.2039.keras', 'PRSA_data_vola14_LSTM28_weights.02-0.0023.keras', 'PRSA_data_at_MLP7_weights.03-13.2212.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0037.keras', 'PRSA_data_vola14_MLP21_weights.69-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.18-0.0013.keras', 'PRSA_data_vola7_MLP21_weights.49-0.0025.keras', 'PRSA_data_vola28_MLP28_weights.31-0.0006.keras', 'PRSA_data_price_MLP21_weights.77-841.5739.keras', 'PRSA_data_vola28_MLP14_weights.35-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.26-0.0031.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0065.keras', 'PRSA_data_price_LSTM14_weights.02-51435.1133.keras', 'PRSA_data_price_LSTM28_weights.20-42714.7695.keras', 'PRSA_data_vola28_LSTM28_weights.16-0.0006.keras', 'PRSA_data_vola14_LSTM28_weights.06-0.0016.keras', 'PRSA_data_vola21_MLP21_weights.13-0.0010.keras', 'PRSA_data_vola28_MLP14_weights.32-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.01-0.0043.keras', 'PRSA_data_vola21_MLP28_weights.08-0.0011.keras', 'PRSA_data_vola21_LSTM28_weights.10-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.14-0.0012.keras', 'PRSA_data_vola7_LSTM28_weights.01-0.0049.keras', 'PRSA_data_price_LSTM28_weights.20-42715.6250.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM_weights.05-67155.5078.keras', 'PRSA_data_price_MLP14_weights.14-1073.6493.keras', 'PRSA_data_vola14_LSTM_weights.03-0.0029.keras', 'PRSA_data_vola14_MLP7_weights.07-0.0021.keras', 'PRSA_data_at_MLP28_weights.03-0.0637.keras', 'PRSA_data_vola21_MLP28_weights.03-0.0012.keras', 'PRSA_data_at_MLP28_weights.05-0.1823.keras', 'PRSA_data_at_MLP21_weights.01-0.9762.keras', 'PRSA_data_vola7_LSTM28_weights.15-0.0035.keras', 'PRSA_data_price_LSTM21_weights.08-42992.0703.keras', 'PRSA_data_vola21_MLP21_weights.22-0.0010.keras', 'PRSA_data_vola28_LSTM28_weights.20-0.0006.keras', 'PRSA_data_at_MLP21_weights.01-4.3241.keras', 'PRSA_data_at_LSTM_weights.06-0.8671.keras', 'PRSA_data_price_MLP7_weights.37-1654.4709.keras', 'PRSA_data_at_LSTM_weights.20-0.4626.keras', 'PRSA_data_vola21_MLP28_weights.27-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.4729.keras', 'PRSA_data_at_MLP7_weights.24-1.4718.keras', 'PRSA_data_at_MLP7_weights.59-0.4636.keras', 'PRSA_data_vola14_LSTM14_weights.06-0.0005.keras', 'PRSA_data_at_MLP7_weights.14-2.2322.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0196.keras', 'PRSA_data_at_LSTM14_weights.19-0.3356.keras', 'PRSA_data_price_LSTM28_weights.18-42735.2305.keras', 'PRSA_data_price_LSTM14_weights.12-51335.6523.keras', 'PRSA_data_vola21_LSTM_weights.06-0.0017.keras', 'PRSA_data_price_LSTM28_weights.05-42862.2383.keras', 'PRSA_data_price_MLP7_weights.10-1248.9592.keras', 'PRSA_data_vola21_MLP28_weights.35-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.03-0.0013.keras', 'PRSA_data_price_MLP14_weights.04-5028.7607.keras', 'PRSA_data_price_LSTM14_weights.08-51374.8945.keras', 'PRSA_data_vola28_MLP14_weights.48-0.0007.keras']
# Predicciones usando el mejor modelo
if best14_lstm_at is not None:
train_preds_at_LSTM14 = best14_lstm_at.predict(X_train_at_lstm_14)
val_preds_at_LSTM14 = best14_lstm_at.predict(X_val_at_lstm_14)
test_preds_at_LSTM14 = best14_lstm_at.predict(X_test_at_lstm_14)
# Aplanar las predicciones si es necesario
train_preds_at_LSTM14 = np.squeeze(train_preds_at_LSTM14)
val_preds_at_LSTM14 = np.squeeze(val_preds_at_LSTM14)
test_preds_at_LSTM14 = np.squeeze(test_preds_price_LSTM14)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_at_LSTM14)
print("Predicciones de validación:", val_preds_at_LSTM14)
print("Predicciones de prueba:", test_preds_at_LSTM14)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
155/155 [==============================] - 0s 1ms/step
1/1 [==============================] - 0s 9ms/step
1/1 [==============================] - 0s 10ms/step
Predicciones de Entrenamiento: [-0.1271441 -0.1271441 -0.1271441 ... 23.18849 23.18949 23.190905 ]
Predicciones de validación: [23.192411 23.19396 23.195671 23.197266 23.199038 23.200684 23.202112
23.203215 23.204134 23.20481 23.205378 23.205723 23.205833 23.20576 ]
Predicciones de prueba: [207.50458 207.50458 207.50458 207.50458 207.50458 207.50458 207.50458
207.50458 207.50458 207.50458 207.50458 207.50458 207.50458 207.50458]
plot_model(data_train_plot_at14[-100:], data_val_plot_at14, data_test_plot_at14, val_preds_at_LSTM14, test_preds_at_LSTM14, "Predicciones usando Memoria a Corto y Largo Paso (LSTM)")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train14_at, jarque_bera_pval_LSTM_train14_at = diagnostic_plots(y_train_at14, train_preds_at_LSTM14)
Ljung-Box LB Statistic: 4532.895900
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_at_LSTM_train14 = metricas(y_train_at14,train_preds_at_LSTM14)
metrica_at_LSTM_train14.index = metrica_at_LSTM_train14.index.map({0: 'LSTM Entrenamiento Retorno Acumulado τ = 14'})
metrica_at_LSTM_train14['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train14_at], index=metrica_at_LSTM_train14.index)
metrica_at_LSTM_train14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train14_at], index=metrica_at_LSTM_train14.index)
metrica_at_LSTM_train14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Retorno Acumulado τ = 14 | 2209.2663 | 3.94% | 0.6 | 0.45 | 98.78% | 0.0 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM14_at, jarque_bera_pvalLSTM14_at = evaluate_residuals(data_test_plot_at14, test_preds_at_LSTM14)
No se rechaza H0: los residuales son independientes (no correlacionados).
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test_at14 = metricas(y_test_at14,test_preds_at_LSTM14)
metrica_LSTM_test_at14.index = metrica_LSTM_test_at14.index.map({0: 'LSTM Prueba Retorno Acumulado τ = 14'})
metrica_LSTM_test_at14['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM14_at], index=metrica_LSTM_test_at14.index)
metrica_LSTM_test_at14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM14_at], index=metrica_LSTM_test_at14.index)
metrica_LSTM_test_at14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Retorno Acumulado τ = 14 | 473291.7791 | 777.81% | 183.87 | 33806.56 | -660795871.98% | 0.0599 | 0.7427 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_at_LSTM14)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_at14, train_preds_at_LSTM14, y_val_at14, val_preds_at_LSTM14, y_test_at14, test_preds_at_LSTM14)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo, ademas muestra un sesgo considerable a la derecha y una alta variabilidad.
Conjunto de Validación: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 21 días (\(\tau=21\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_at_lstm_21, X_val_at_lstm_21, X_test_at_lstm_21 = change_dimension_lstm(X_train_at21, X_val_at21, X_test_at21)
Shape of 3D arrays X: (4915, 21, 1) (21, 21, 1) (21, 21, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(21,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=10, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_at_lstm_21, y_train_at21)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 37.5s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 37.6s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 38.1s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 38.3s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 38.5s
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 0.31983605218670463
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape21 = 21
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM21_at = build_models_lstm(input_shape21, neurons_list, dropout_rates, 'SGD')
Model: "model_239"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_240 (InputLayer) [(None, 21, 1)] 0
lstm_206 (LSTM) (None, 21, 64) 16896
lstm_207 (LSTM) (None, 32) 12416
dropout_239 (Dropout) (None, 32) 0
dense_647 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_239"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_240 (InputLayer) [(None, 21, 1)] 0
lstm_206 (LSTM) (None, 21, 64) 16896
lstm_207 (LSTM) (None, 32) 12416
dropout_239 (Dropout) (None, 32) 0
dense_647 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_240"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_241 (InputLayer) [(None, 21, 1)] 0
lstm_208 (LSTM) (None, 21, 64) 16896
lstm_209 (LSTM) (None, 32) 12416
dropout_240 (Dropout) (None, 32) 0
dense_648 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_240"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_241 (InputLayer) [(None, 21, 1)] 0
lstm_208 (LSTM) (None, 21, 64) 16896
lstm_209 (LSTM) (None, 32) 12416
dropout_240 (Dropout) (None, 32) 0
dense_648 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_241"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_242 (InputLayer) [(None, 21, 1)] 0
lstm_210 (LSTM) (None, 21, 64) 16896
lstm_211 (LSTM) (None, 32) 12416
dropout_241 (Dropout) (None, 32) 0
dense_649 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_241"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_242 (InputLayer) [(None, 21, 1)] 0
lstm_210 (LSTM) (None, 21, 64) 16896
lstm_211 (LSTM) (None, 32) 12416
dropout_241 (Dropout) (None, 32) 0
dense_649 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_242"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_243 (InputLayer) [(None, 21, 1)] 0
lstm_212 (LSTM) (None, 21, 64) 16896
lstm_213 (LSTM) (None, 32) 12416
dropout_242 (Dropout) (None, 32) 0
dense_650 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_242"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_243 (InputLayer) [(None, 21, 1)] 0
lstm_212 (LSTM) (None, 21, 64) 16896
lstm_213 (LSTM) (None, 32) 12416
dropout_242 (Dropout) (None, 32) 0
dense_650 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_243"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_244 (InputLayer) [(None, 21, 1)] 0
lstm_214 (LSTM) (None, 21, 64) 16896
lstm_215 (LSTM) (None, 32) 12416
dropout_243 (Dropout) (None, 32) 0
dense_651 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_243"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_244 (InputLayer) [(None, 21, 1)] 0
lstm_214 (LSTM) (None, 21, 64) 16896
lstm_215 (LSTM) (None, 32) 12416
dropout_243 (Dropout) (None, 32) 0
dense_651 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_244"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_245 (InputLayer) [(None, 21, 1)] 0
lstm_216 (LSTM) (None, 21, 64) 16896
lstm_217 (LSTM) (None, 32) 12416
dropout_244 (Dropout) (None, 32) 0
dense_652 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_244"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_245 (InputLayer) [(None, 21, 1)] 0
lstm_216 (LSTM) (None, 21, 64) 16896
lstm_217 (LSTM) (None, 32) 12416
dropout_244 (Dropout) (None, 32) 0
dense_652 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_245"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_246 (InputLayer) [(None, 21, 1)] 0
lstm_218 (LSTM) (None, 21, 64) 16896
lstm_219 (LSTM) (None, 32) 12416
dropout_245 (Dropout) (None, 32) 0
dense_653 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_245"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_246 (InputLayer) [(None, 21, 1)] 0
lstm_218 (LSTM) (None, 21, 64) 16896
lstm_219 (LSTM) (None, 32) 12416
dropout_245 (Dropout) (None, 32) 0
dense_653 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_246"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_247 (InputLayer) [(None, 21, 1)] 0
lstm_220 (LSTM) (None, 21, 64) 16896
lstm_221 (LSTM) (None, 32) 12416
dropout_246 (Dropout) (None, 32) 0
dense_654 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_246"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_247 (InputLayer) [(None, 21, 1)] 0
lstm_220 (LSTM) (None, 21, 64) 16896
lstm_221 (LSTM) (None, 32) 12416
dropout_246 (Dropout) (None, 32) 0
dense_654 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_247"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_248 (InputLayer) [(None, 21, 1)] 0
lstm_222 (LSTM) (None, 21, 64) 16896
lstm_223 (LSTM) (None, 32) 12416
dropout_247 (Dropout) (None, 32) 0
dense_655 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_247"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_248 (InputLayer) [(None, 21, 1)] 0
lstm_222 (LSTM) (None, 21, 64) 16896
lstm_223 (LSTM) (None, 32) 12416
dropout_247 (Dropout) (None, 32) 0
dense_655 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_248"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_249 (InputLayer) [(None, 21, 1)] 0
lstm_224 (LSTM) (None, 21, 64) 16896
lstm_225 (LSTM) (None, 32) 12416
dropout_248 (Dropout) (None, 32) 0
dense_656 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_248"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_249 (InputLayer) [(None, 21, 1)] 0
lstm_224 (LSTM) (None, 21, 64) 16896
lstm_225 (LSTM) (None, 32) 12416
dropout_248 (Dropout) (None, 32) 0
dense_656 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_249"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_250 (InputLayer) [(None, 21, 1)] 0
lstm_226 (LSTM) (None, 21, 64) 16896
lstm_227 (LSTM) (None, 32) 12416
dropout_249 (Dropout) (None, 32) 0
dense_657 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_249"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_250 (InputLayer) [(None, 21, 1)] 0
lstm_226 (LSTM) (None, 21, 64) 16896
lstm_227 (LSTM) (None, 32) 12416
dropout_249 (Dropout) (None, 32) 0
dense_657 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_250"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_251 (InputLayer) [(None, 21, 1)] 0
lstm_228 (LSTM) (None, 21, 64) 16896
lstm_229 (LSTM) (None, 32) 12416
dropout_250 (Dropout) (None, 32) 0
dense_658 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_250"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_251 (InputLayer) [(None, 21, 1)] 0
lstm_228 (LSTM) (None, 21, 64) 16896
lstm_229 (LSTM) (None, 32) 12416
dropout_250 (Dropout) (None, 32) 0
dense_658 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_251"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_252 (InputLayer) [(None, 21, 1)] 0
lstm_230 (LSTM) (None, 21, 64) 16896
lstm_231 (LSTM) (None, 32) 12416
dropout_251 (Dropout) (None, 32) 0
dense_659 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_251"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_252 (InputLayer) [(None, 21, 1)] 0
lstm_230 (LSTM) (None, 21, 64) 16896
lstm_231 (LSTM) (None, 32) 12416
dropout_251 (Dropout) (None, 32) 0
dense_659 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_252"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_253 (InputLayer) [(None, 21, 1)] 0
lstm_232 (LSTM) (None, 21, 64) 16896
lstm_233 (LSTM) (None, 32) 12416
dropout_252 (Dropout) (None, 32) 0
dense_660 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_252"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_253 (InputLayer) [(None, 21, 1)] 0
lstm_232 (LSTM) (None, 21, 64) 16896
lstm_233 (LSTM) (None, 32) 12416
dropout_252 (Dropout) (None, 32) 0
dense_660 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_253"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_254 (InputLayer) [(None, 21, 1)] 0
lstm_234 (LSTM) (None, 21, 64) 16896
lstm_235 (LSTM) (None, 32) 12416
dropout_253 (Dropout) (None, 32) 0
dense_661 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_253"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_254 (InputLayer) [(None, 21, 1)] 0
lstm_234 (LSTM) (None, 21, 64) 16896
lstm_235 (LSTM) (None, 32) 12416
dropout_253 (Dropout) (None, 32) 0
dense_661 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_254"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_255 (InputLayer) [(None, 21, 1)] 0
lstm_236 (LSTM) (None, 21, 64) 16896
lstm_237 (LSTM) (None, 32) 12416
dropout_254 (Dropout) (None, 32) 0
dense_662 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_254"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_255 (InputLayer) [(None, 21, 1)] 0
lstm_236 (LSTM) (None, 21, 64) 16896
lstm_237 (LSTM) (None, 32) 12416
dropout_254 (Dropout) (None, 32) 0
dense_662 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_at_LSTM21_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best21_lstm_at = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM21.
import os
from joblib import dump, load
history_at_LSTM21 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM21):
filename = f'history_at_LSTM21_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_at_lstm_21, y=y_train_at21, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best21_lstm_at], validation_data=(X_val_at_lstm_21, y_val_at21),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_at_LSTM21.append(model_history if isinstance(model_history, dict) else model_history.history)
Epoch 1/20
Epoch 1: val_loss improved from inf to 5.94519, saving model to keras_models/PRSA_data_at_LSTM21_weights.01-5.9452.keras
308/308 - 3s - loss: 6.0419 - val_loss: 5.9452 - 3s/epoch - 9ms/step
Epoch 2/20
Epoch 2: val_loss improved from 5.94519 to 2.00830, saving model to keras_models/PRSA_data_at_LSTM21_weights.02-2.0083.keras
308/308 - 2s - loss: 1.7869 - val_loss: 2.0083 - 2s/epoch - 5ms/step
Epoch 3/20
Epoch 3: val_loss improved from 2.00830 to 1.03234, saving model to keras_models/PRSA_data_at_LSTM21_weights.03-1.0323.keras
308/308 - 2s - loss: 1.2657 - val_loss: 1.0323 - 2s/epoch - 5ms/step
Epoch 4/20
Epoch 4: val_loss improved from 1.03234 to 0.80834, saving model to keras_models/PRSA_data_at_LSTM21_weights.04-0.8083.keras
308/308 - 2s - loss: 1.2769 - val_loss: 0.8083 - 2s/epoch - 5ms/step
Epoch 5/20
Epoch 5: val_loss improved from 0.80834 to 0.75127, saving model to keras_models/PRSA_data_at_LSTM21_weights.05-0.7513.keras
308/308 - 2s - loss: 1.2180 - val_loss: 0.7513 - 2s/epoch - 5ms/step
Epoch 6/20
Epoch 6: val_loss improved from 0.75127 to 0.62188, saving model to keras_models/PRSA_data_at_LSTM21_weights.06-0.6219.keras
308/308 - 2s - loss: 1.2367 - val_loss: 0.6219 - 2s/epoch - 5ms/step
Epoch 7/20
Epoch 7: val_loss improved from 0.62188 to 0.51131, saving model to keras_models/PRSA_data_at_LSTM21_weights.07-0.5113.keras
308/308 - 2s - loss: 1.1960 - val_loss: 0.5113 - 2s/epoch - 5ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.51131
308/308 - 2s - loss: 1.1896 - val_loss: 0.6740 - 2s/epoch - 5ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.51131
308/308 - 2s - loss: 1.1891 - val_loss: 0.8958 - 2s/epoch - 5ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.51131
308/308 - 2s - loss: 1.1754 - val_loss: 1.1988 - 2s/epoch - 5ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.51131
308/308 - 2s - loss: 1.1577 - val_loss: 0.5795 - 2s/epoch - 5ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.51131
308/308 - 2s - loss: 1.1739 - val_loss: 1.2995 - 2s/epoch - 5ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.51131
308/308 - 2s - loss: 1.1194 - val_loss: 0.7683 - 2s/epoch - 5ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.51131
308/308 - 2s - loss: 1.1474 - val_loss: 0.8144 - 2s/epoch - 5ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.51131
308/308 - 2s - loss: 1.1649 - val_loss: 0.6359 - 2s/epoch - 5ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.51131
308/308 - 2s - loss: 1.1369 - val_loss: 0.9901 - 2s/epoch - 5ms/step
Epoch 17/20
Epoch 17: val_loss improved from 0.51131 to 0.50695, saving model to keras_models/PRSA_data_at_LSTM21_weights.17-0.5070.keras
308/308 - 2s - loss: 1.1472 - val_loss: 0.5070 - 2s/epoch - 5ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.50695
308/308 - 2s - loss: 1.1208 - val_loss: 0.6123 - 2s/epoch - 5ms/step
Epoch 19/20
Epoch 19: val_loss improved from 0.50695 to 0.36699, saving model to keras_models/PRSA_data_at_LSTM21_weights.19-0.3670.keras
308/308 - 2s - loss: 1.1163 - val_loss: 0.3670 - 2s/epoch - 5ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.36699
308/308 - 2s - loss: 1.1058 - val_loss: 0.6575 - 2s/epoch - 5ms/step
El entrenamiento del modelo 1 se ha completado y el historial ha sido guardado en 'history_at_LSTM21_model_0.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.36699
308/308 - 3s - loss: 6.5140 - val_loss: 6.2942 - 3s/epoch - 10ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.36699
308/308 - 2s - loss: 1.7843 - val_loss: 2.1396 - 2s/epoch - 5ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.36699
308/308 - 2s - loss: 1.3064 - val_loss: 1.1322 - 2s/epoch - 5ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.36699
308/308 - 2s - loss: 1.2423 - val_loss: 0.7115 - 2s/epoch - 5ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.36699
308/308 - 2s - loss: 1.2003 - val_loss: 0.7027 - 2s/epoch - 5ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.36699
308/308 - 2s - loss: 1.1874 - val_loss: 0.6008 - 2s/epoch - 6ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.36699
308/308 - 2s - loss: 1.2087 - val_loss: 0.5702 - 2s/epoch - 5ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.36699
308/308 - 2s - loss: 1.2003 - val_loss: 0.5639 - 2s/epoch - 5ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.36699
308/308 - 2s - loss: 1.1861 - val_loss: 0.5233 - 2s/epoch - 5ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.36699
308/308 - 2s - loss: 1.1586 - val_loss: 0.4401 - 2s/epoch - 5ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.36699
308/308 - 2s - loss: 1.1475 - val_loss: 0.7885 - 2s/epoch - 5ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.36699
308/308 - 2s - loss: 1.1333 - val_loss: 0.5434 - 2s/epoch - 5ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.36699
308/308 - 2s - loss: 1.1468 - val_loss: 0.4712 - 2s/epoch - 5ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.36699
308/308 - 2s - loss: 1.1431 - val_loss: 0.7115 - 2s/epoch - 5ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.36699
308/308 - 2s - loss: 1.1338 - val_loss: 0.7642 - 2s/epoch - 5ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.36699
308/308 - 2s - loss: 1.1500 - val_loss: 0.4734 - 2s/epoch - 5ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.36699
308/308 - 2s - loss: 1.1356 - val_loss: 0.7827 - 2s/epoch - 5ms/step
Epoch 18/20
Epoch 18: val_loss improved from 0.36699 to 0.35608, saving model to keras_models/PRSA_data_at_LSTM21_weights.18-0.3561.keras
308/308 - 2s - loss: 1.1401 - val_loss: 0.3561 - 2s/epoch - 5ms/step
Epoch 19/20
Epoch 19: val_loss improved from 0.35608 to 0.27443, saving model to keras_models/PRSA_data_at_LSTM21_weights.19-0.2744.keras
308/308 - 2s - loss: 1.1031 - val_loss: 0.2744 - 2s/epoch - 5ms/step
Epoch 20/20
Epoch 20: val_loss improved from 0.27443 to 0.08549, saving model to keras_models/PRSA_data_at_LSTM21_weights.20-0.0855.keras
308/308 - 2s - loss: 1.1006 - val_loss: 0.0855 - 2s/epoch - 5ms/step
El entrenamiento del modelo 2 se ha completado y el historial ha sido guardado en 'history_at_LSTM21_model_1.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.08549
308/308 - 3s - loss: 7.1273 - val_loss: 7.1455 - 3s/epoch - 9ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.08549
308/308 - 2s - loss: 2.0265 - val_loss: 2.3703 - 2s/epoch - 5ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.3091 - val_loss: 1.2190 - 2s/epoch - 5ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2336 - val_loss: 0.9028 - 2s/epoch - 5ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2280 - val_loss: 0.7407 - 2s/epoch - 5ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1769 - val_loss: 0.5555 - 2s/epoch - 5ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1715 - val_loss: 0.6390 - 2s/epoch - 5ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1738 - val_loss: 0.3321 - 2s/epoch - 5ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1618 - val_loss: 0.5163 - 2s/epoch - 5ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1536 - val_loss: 0.8458 - 2s/epoch - 5ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1752 - val_loss: 0.8811 - 2s/epoch - 5ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1755 - val_loss: 0.5188 - 2s/epoch - 5ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1264 - val_loss: 0.7480 - 2s/epoch - 5ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1268 - val_loss: 0.5403 - 2s/epoch - 5ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1438 - val_loss: 0.5043 - 2s/epoch - 5ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1120 - val_loss: 0.5512 - 2s/epoch - 5ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1013 - val_loss: 0.4645 - 2s/epoch - 5ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1149 - val_loss: 0.2916 - 2s/epoch - 5ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1010 - val_loss: 0.5605 - 2s/epoch - 5ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1159 - val_loss: 0.5083 - 2s/epoch - 5ms/step
El entrenamiento del modelo 3 se ha completado y el historial ha sido guardado en 'history_at_LSTM21_model_2.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.08549
308/308 - 3s - loss: 7.0087 - val_loss: 6.9603 - 3s/epoch - 10ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.9782 - val_loss: 2.2822 - 2s/epoch - 5ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2926 - val_loss: 1.1988 - 2s/epoch - 5ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2260 - val_loss: 0.7606 - 2s/epoch - 5ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2119 - val_loss: 0.8530 - 2s/epoch - 5ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1924 - val_loss: 1.0002 - 2s/epoch - 5ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1810 - val_loss: 0.4576 - 2s/epoch - 5ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1771 - val_loss: 0.5263 - 2s/epoch - 5ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1784 - val_loss: 0.3535 - 2s/epoch - 5ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1757 - val_loss: 0.5535 - 2s/epoch - 5ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1266 - val_loss: 0.6348 - 2s/epoch - 5ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1667 - val_loss: 0.8479 - 2s/epoch - 6ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1332 - val_loss: 0.7472 - 2s/epoch - 5ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1678 - val_loss: 0.5896 - 2s/epoch - 5ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1602 - val_loss: 0.6890 - 2s/epoch - 5ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1077 - val_loss: 0.9204 - 2s/epoch - 5ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1387 - val_loss: 0.4964 - 2s/epoch - 5ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1386 - val_loss: 0.8676 - 2s/epoch - 5ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1014 - val_loss: 0.6461 - 2s/epoch - 5ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1263 - val_loss: 0.7324 - 2s/epoch - 5ms/step
El entrenamiento del modelo 4 se ha completado y el historial ha sido guardado en 'history_at_LSTM21_model_3.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.08549
308/308 - 3s - loss: 6.4830 - val_loss: 6.3643 - 3s/epoch - 9ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.8060 - val_loss: 2.1596 - 2s/epoch - 5ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.3385 - val_loss: 1.1107 - 2s/epoch - 5ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2570 - val_loss: 0.7164 - 2s/epoch - 5ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1985 - val_loss: 0.5929 - 2s/epoch - 5ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2166 - val_loss: 0.5098 - 2s/epoch - 5ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1552 - val_loss: 0.8650 - 1s/epoch - 5ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1715 - val_loss: 1.0648 - 1s/epoch - 5ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1744 - val_loss: 1.1024 - 2s/epoch - 5ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1835 - val_loss: 0.5842 - 2s/epoch - 5ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1741 - val_loss: 0.6538 - 2s/epoch - 6ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1687 - val_loss: 1.0002 - 2s/epoch - 5ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1700 - val_loss: 0.6725 - 2s/epoch - 5ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1428 - val_loss: 0.7281 - 2s/epoch - 5ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1226 - val_loss: 1.0745 - 2s/epoch - 5ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1209 - val_loss: 0.7797 - 2s/epoch - 5ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1310 - val_loss: 0.7071 - 2s/epoch - 5ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1314 - val_loss: 0.6051 - 2s/epoch - 5ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1208 - val_loss: 1.1806 - 2s/epoch - 5ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.0763 - val_loss: 0.4662 - 2s/epoch - 5ms/step
El entrenamiento del modelo 5 se ha completado y el historial ha sido guardado en 'history_at_LSTM21_model_4.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.08549
308/308 - 3s - loss: 6.6569 - val_loss: 6.6690 - 3s/epoch - 9ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.9327 - val_loss: 2.2156 - 2s/epoch - 5ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.3026 - val_loss: 1.1370 - 2s/epoch - 5ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2377 - val_loss: 0.8163 - 2s/epoch - 5ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2043 - val_loss: 0.8028 - 2s/epoch - 5ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1973 - val_loss: 0.9457 - 2s/epoch - 5ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1938 - val_loss: 0.8411 - 2s/epoch - 5ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1815 - val_loss: 0.6121 - 2s/epoch - 5ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1657 - val_loss: 0.5641 - 2s/epoch - 5ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1545 - val_loss: 0.5236 - 2s/epoch - 5ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1718 - val_loss: 1.3807 - 2s/epoch - 5ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1810 - val_loss: 0.7500 - 2s/epoch - 5ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1771 - val_loss: 0.6484 - 1s/epoch - 5ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1538 - val_loss: 0.9089 - 1s/epoch - 5ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1299 - val_loss: 0.4954 - 2s/epoch - 5ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1634 - val_loss: 0.6839 - 1s/epoch - 5ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1507 - val_loss: 0.3773 - 2s/epoch - 5ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1268 - val_loss: 0.5883 - 2s/epoch - 5ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1539 - val_loss: 0.5794 - 2s/epoch - 5ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1206 - val_loss: 0.2575 - 2s/epoch - 5ms/step
El entrenamiento del modelo 6 se ha completado y el historial ha sido guardado en 'history_at_LSTM21_model_5.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.08549
308/308 - 3s - loss: 7.3752 - val_loss: 7.3926 - 3s/epoch - 10ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.08549
308/308 - 1s - loss: 2.0334 - val_loss: 2.2926 - 1s/epoch - 5ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.3247 - val_loss: 1.3193 - 1s/epoch - 5ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2560 - val_loss: 0.9270 - 2s/epoch - 5ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.2517 - val_loss: 0.9133 - 1s/epoch - 5ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.2063 - val_loss: 0.6316 - 1s/epoch - 5ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.2130 - val_loss: 0.6445 - 1s/epoch - 5ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1898 - val_loss: 0.7302 - 2s/epoch - 5ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1717 - val_loss: 0.4348 - 1s/epoch - 5ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1601 - val_loss: 0.6343 - 1s/epoch - 5ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1732 - val_loss: 0.8842 - 1s/epoch - 5ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1701 - val_loss: 0.7904 - 1s/epoch - 5ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1357 - val_loss: 0.9920 - 1s/epoch - 5ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1508 - val_loss: 0.2475 - 1s/epoch - 5ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.0953 - val_loss: 0.6600 - 2s/epoch - 5ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1256 - val_loss: 0.5158 - 1s/epoch - 5ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1301 - val_loss: 0.6579 - 1s/epoch - 5ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1101 - val_loss: 0.6466 - 1s/epoch - 5ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1087 - val_loss: 0.3911 - 2s/epoch - 5ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.0952 - val_loss: 0.5079 - 1s/epoch - 5ms/step
El entrenamiento del modelo 7 se ha completado y el historial ha sido guardado en 'history_at_LSTM21_model_6.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.08549
308/308 - 3s - loss: 7.3893 - val_loss: 7.2815 - 3s/epoch - 9ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.08549
308/308 - 1s - loss: 2.1168 - val_loss: 2.5884 - 1s/epoch - 5ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.3322 - val_loss: 1.2033 - 1s/epoch - 5ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2706 - val_loss: 0.8692 - 2s/epoch - 5ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2356 - val_loss: 1.5150 - 2s/epoch - 5ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.2095 - val_loss: 0.7358 - 1s/epoch - 5ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1948 - val_loss: 0.8825 - 1s/epoch - 5ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1588 - val_loss: 0.7367 - 1s/epoch - 5ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1636 - val_loss: 0.5344 - 1s/epoch - 5ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1819 - val_loss: 0.8698 - 1s/epoch - 5ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1549 - val_loss: 0.6299 - 1s/epoch - 5ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1474 - val_loss: 0.7311 - 1s/epoch - 5ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1419 - val_loss: 0.4687 - 1s/epoch - 5ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1122 - val_loss: 0.8526 - 1s/epoch - 5ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1377 - val_loss: 0.8779 - 1s/epoch - 5ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1421 - val_loss: 0.7008 - 1s/epoch - 5ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1193 - val_loss: 0.6024 - 1s/epoch - 5ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1349 - val_loss: 0.6465 - 1s/epoch - 5ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1211 - val_loss: 0.5281 - 1s/epoch - 5ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1083 - val_loss: 0.6049 - 1s/epoch - 5ms/step
El entrenamiento del modelo 8 se ha completado y el historial ha sido guardado en 'history_at_LSTM21_model_7.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.08549
308/308 - 3s - loss: 7.1502 - val_loss: 7.2204 - 3s/epoch - 9ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.08549
308/308 - 1s - loss: 2.0721 - val_loss: 2.4275 - 1s/epoch - 5ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.3561 - val_loss: 1.2339 - 1s/epoch - 5ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2254 - val_loss: 0.7266 - 2s/epoch - 5ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2219 - val_loss: 0.5660 - 2s/epoch - 5ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.2153 - val_loss: 0.9443 - 1s/epoch - 5ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1902 - val_loss: 0.5496 - 1s/epoch - 5ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1760 - val_loss: 0.5335 - 1s/epoch - 5ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1713 - val_loss: 1.0479 - 1s/epoch - 5ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1544 - val_loss: 0.6174 - 1s/epoch - 5ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1845 - val_loss: 0.6813 - 1s/epoch - 5ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1421 - val_loss: 0.5685 - 1s/epoch - 5ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1335 - val_loss: 0.4277 - 1s/epoch - 5ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1369 - val_loss: 0.6532 - 1s/epoch - 5ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1204 - val_loss: 0.4084 - 1s/epoch - 5ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1127 - val_loss: 0.5608 - 1s/epoch - 5ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1224 - val_loss: 0.1838 - 1s/epoch - 5ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1108 - val_loss: 0.3477 - 1s/epoch - 5ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1073 - val_loss: 0.5594 - 1s/epoch - 5ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1275 - val_loss: 0.4608 - 1s/epoch - 5ms/step
El entrenamiento del modelo 9 se ha completado y el historial ha sido guardado en 'history_at_LSTM21_model_8.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.08549
308/308 - 3s - loss: 7.1617 - val_loss: 6.9325 - 3s/epoch - 8ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.9359 - val_loss: 2.2067 - 1s/epoch - 5ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.3322 - val_loss: 1.3082 - 1s/epoch - 5ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.2563 - val_loss: 1.2014 - 1s/epoch - 5ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1969 - val_loss: 0.7588 - 1s/epoch - 5ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.2027 - val_loss: 0.4846 - 1s/epoch - 5ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1781 - val_loss: 0.9768 - 1s/epoch - 5ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1764 - val_loss: 0.5495 - 1s/epoch - 5ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1699 - val_loss: 0.4125 - 1s/epoch - 5ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1489 - val_loss: 0.6920 - 1s/epoch - 5ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1632 - val_loss: 0.5595 - 1s/epoch - 5ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1347 - val_loss: 0.7443 - 1s/epoch - 5ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1108 - val_loss: 0.6382 - 1s/epoch - 5ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1224 - val_loss: 0.8785 - 1s/epoch - 5ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1219 - val_loss: 0.4760 - 1s/epoch - 5ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.0917 - val_loss: 0.8484 - 1s/epoch - 5ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1569 - val_loss: 0.5316 - 1s/epoch - 5ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1098 - val_loss: 0.8112 - 1s/epoch - 5ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.0903 - val_loss: 0.8627 - 1s/epoch - 5ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1383 - val_loss: 0.6877 - 1s/epoch - 5ms/step
El entrenamiento del modelo 10 se ha completado y el historial ha sido guardado en 'history_at_LSTM21_model_9.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.08549
308/308 - 3s - loss: 7.0648 - val_loss: 7.2272 - 3s/epoch - 8ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.08549
308/308 - 1s - loss: 2.0247 - val_loss: 2.2883 - 1s/epoch - 5ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.3395 - val_loss: 1.1504 - 1s/epoch - 5ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2341 - val_loss: 0.9260 - 2s/epoch - 5ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.2241 - val_loss: 0.7659 - 1s/epoch - 5ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.2065 - val_loss: 0.9182 - 1s/epoch - 5ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.2092 - val_loss: 0.5597 - 1s/epoch - 5ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1988 - val_loss: 0.4764 - 2s/epoch - 5ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1697 - val_loss: 0.5353 - 1s/epoch - 5ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1895 - val_loss: 0.4775 - 1s/epoch - 5ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1855 - val_loss: 0.8048 - 1s/epoch - 5ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1517 - val_loss: 0.6131 - 1s/epoch - 5ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1327 - val_loss: 0.7125 - 1s/epoch - 5ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1390 - val_loss: 0.6390 - 1s/epoch - 5ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1447 - val_loss: 0.4719 - 1s/epoch - 5ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1640 - val_loss: 0.7268 - 1s/epoch - 5ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1282 - val_loss: 0.7805 - 1s/epoch - 5ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1411 - val_loss: 0.7617 - 1s/epoch - 5ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1395 - val_loss: 0.4665 - 2s/epoch - 5ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1086 - val_loss: 0.5491 - 1s/epoch - 5ms/step
El entrenamiento del modelo 11 se ha completado y el historial ha sido guardado en 'history_at_LSTM21_model_10.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.08549
308/308 - 3s - loss: 6.6424 - val_loss: 6.4730 - 3s/epoch - 10ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.8545 - val_loss: 2.1252 - 2s/epoch - 5ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.3603 - val_loss: 1.1937 - 2s/epoch - 5ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2359 - val_loss: 1.1290 - 2s/epoch - 5ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2234 - val_loss: 0.8825 - 2s/epoch - 5ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1891 - val_loss: 0.6902 - 2s/epoch - 5ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2039 - val_loss: 0.6463 - 2s/epoch - 5ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1778 - val_loss: 0.7837 - 2s/epoch - 5ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1967 - val_loss: 0.7656 - 2s/epoch - 5ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1823 - val_loss: 0.5431 - 2s/epoch - 5ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1722 - val_loss: 0.5096 - 2s/epoch - 5ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1575 - val_loss: 0.6466 - 2s/epoch - 5ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1650 - val_loss: 0.5429 - 2s/epoch - 5ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1426 - val_loss: 0.6024 - 2s/epoch - 5ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1428 - val_loss: 0.7238 - 2s/epoch - 5ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1442 - val_loss: 0.7417 - 2s/epoch - 5ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1202 - val_loss: 0.5819 - 2s/epoch - 5ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1190 - val_loss: 0.7057 - 2s/epoch - 5ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1044 - val_loss: 0.9303 - 2s/epoch - 5ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1078 - val_loss: 0.5596 - 2s/epoch - 5ms/step
El entrenamiento del modelo 12 se ha completado y el historial ha sido guardado en 'history_at_LSTM21_model_11.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.08549
308/308 - 3s - loss: 6.1689 - val_loss: 5.8842 - 3s/epoch - 9ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.7807 - val_loss: 2.0410 - 2s/epoch - 5ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.3191 - val_loss: 1.1582 - 2s/epoch - 5ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2937 - val_loss: 0.8128 - 2s/epoch - 5ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2376 - val_loss: 0.9585 - 2s/epoch - 5ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2428 - val_loss: 0.8703 - 2s/epoch - 5ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1970 - val_loss: 0.6418 - 2s/epoch - 5ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1885 - val_loss: 0.6698 - 2s/epoch - 5ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1340 - val_loss: 0.9150 - 2s/epoch - 5ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2063 - val_loss: 1.0338 - 2s/epoch - 5ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1572 - val_loss: 0.3219 - 1s/epoch - 5ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1576 - val_loss: 0.7655 - 2s/epoch - 5ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1407 - val_loss: 0.6427 - 2s/epoch - 5ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1637 - val_loss: 0.6377 - 2s/epoch - 5ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1179 - val_loss: 0.7854 - 2s/epoch - 5ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1449 - val_loss: 0.6279 - 2s/epoch - 5ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1248 - val_loss: 0.5951 - 2s/epoch - 5ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1260 - val_loss: 0.7793 - 1s/epoch - 5ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1138 - val_loss: 0.6069 - 2s/epoch - 5ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.0981 - val_loss: 0.5909 - 2s/epoch - 5ms/step
El entrenamiento del modelo 13 se ha completado y el historial ha sido guardado en 'history_at_LSTM21_model_12.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.08549
308/308 - 3s - loss: 6.2981 - val_loss: 6.0612 - 3s/epoch - 9ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.7782 - val_loss: 2.0103 - 2s/epoch - 5ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.3362 - val_loss: 1.0408 - 2s/epoch - 5ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2361 - val_loss: 0.7050 - 2s/epoch - 5ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2603 - val_loss: 0.6949 - 2s/epoch - 5ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2123 - val_loss: 0.6218 - 2s/epoch - 5ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1753 - val_loss: 0.6270 - 2s/epoch - 5ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1761 - val_loss: 0.5216 - 2s/epoch - 5ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1819 - val_loss: 0.9038 - 2s/epoch - 5ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1672 - val_loss: 0.7372 - 2s/epoch - 5ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1724 - val_loss: 0.8152 - 2s/epoch - 5ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1611 - val_loss: 0.6398 - 2s/epoch - 5ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1543 - val_loss: 0.5062 - 1s/epoch - 5ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1744 - val_loss: 0.5168 - 1s/epoch - 5ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1340 - val_loss: 0.7132 - 1s/epoch - 5ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1403 - val_loss: 0.3602 - 1s/epoch - 5ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1314 - val_loss: 0.8002 - 1s/epoch - 5ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1321 - val_loss: 0.7715 - 1s/epoch - 5ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1485 - val_loss: 0.4892 - 2s/epoch - 5ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1285 - val_loss: 0.6801 - 1s/epoch - 5ms/step
El entrenamiento del modelo 14 se ha completado y el historial ha sido guardado en 'history_at_LSTM21_model_13.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.08549
308/308 - 3s - loss: 6.9687 - val_loss: 7.1083 - 3s/epoch - 9ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.08549
308/308 - 2s - loss: 2.0277 - val_loss: 2.3601 - 2s/epoch - 5ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.2988 - val_loss: 1.0499 - 1s/epoch - 5ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2118 - val_loss: 0.7885 - 2s/epoch - 5ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2079 - val_loss: 0.9392 - 2s/epoch - 5ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2041 - val_loss: 0.7085 - 2s/epoch - 5ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.2005 - val_loss: 0.5636 - 2s/epoch - 5ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1827 - val_loss: 0.8485 - 1s/epoch - 5ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1776 - val_loss: 0.3981 - 2s/epoch - 5ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1894 - val_loss: 0.6039 - 2s/epoch - 5ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1661 - val_loss: 0.5375 - 1s/epoch - 5ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1541 - val_loss: 0.5575 - 2s/epoch - 5ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1482 - val_loss: 0.4185 - 2s/epoch - 5ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1497 - val_loss: 0.4861 - 2s/epoch - 5ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1565 - val_loss: 0.4202 - 2s/epoch - 5ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1204 - val_loss: 0.4603 - 1s/epoch - 5ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1476 - val_loss: 0.7203 - 2s/epoch - 5ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1163 - val_loss: 0.1781 - 2s/epoch - 5ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1156 - val_loss: 0.9879 - 1s/epoch - 5ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1117 - val_loss: 0.4723 - 2s/epoch - 5ms/step
El entrenamiento del modelo 15 se ha completado y el historial ha sido guardado en 'history_at_LSTM21_model_14.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.08549
308/308 - 3s - loss: 7.7651 - val_loss: 8.0594 - 3s/epoch - 9ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.08549
308/308 - 2s - loss: 2.1776 - val_loss: 2.3204 - 2s/epoch - 5ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.3476 - val_loss: 1.3050 - 1s/epoch - 5ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.2706 - val_loss: 0.8022 - 1s/epoch - 5ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.2186 - val_loss: 0.8870 - 1s/epoch - 5ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.2117 - val_loss: 0.8281 - 1s/epoch - 5ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1877 - val_loss: 0.6939 - 1s/epoch - 5ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1962 - val_loss: 0.7068 - 1s/epoch - 5ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1734 - val_loss: 0.6576 - 2s/epoch - 5ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1635 - val_loss: 0.5364 - 2s/epoch - 5ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1471 - val_loss: 0.6842 - 1s/epoch - 5ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1384 - val_loss: 0.8738 - 2s/epoch - 5ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1387 - val_loss: 0.8492 - 2s/epoch - 5ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1708 - val_loss: 0.6717 - 1s/epoch - 5ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1358 - val_loss: 0.2832 - 2s/epoch - 5ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1441 - val_loss: 0.6861 - 1s/epoch - 5ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1287 - val_loss: 0.5905 - 1s/epoch - 5ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.1309 - val_loss: 0.5648 - 2s/epoch - 5ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.08549
308/308 - 1s - loss: 1.1084 - val_loss: 0.1689 - 1s/epoch - 5ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.08549
308/308 - 2s - loss: 1.0861 - val_loss: 0.3070 - 2s/epoch - 5ms/step
El entrenamiento del modelo 16 se ha completado y el historial ha sido guardado en 'history_at_LSTM21_model_15.joblib'.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_at_LSTM21_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best21_lstm_at = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best21_lstm_at = load_model(best_model_path) # Cargar el modelo
if best21_lstm_at is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_at_LSTM21_weights.20-0.0855.keras con val_loss: 0.0855
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
print("Archivos en el directorio 'keras_models':", files)
Archivos en el directorio 'keras_models': ['PRSA_data_vola14_MLP7_weights.04-0.0018.keras', 'PRSA_data_price_MLP21_weights.01-3579.4128.keras', 'PRSA_data_price_LSTM21_weights.01-43062.3047.keras', 'PRSA_data_price_LSTM28_weights.02-42892.2305.keras', 'PRSA_data_price_LSTM_weights.19-67016.3047.keras', 'PRSA_data_vola14_MLP21_weights.20-0.0011.keras', 'PRSA_data_price_LSTM14_weights.06-51394.8750.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.07-0.0033.keras', 'PRSA_data_price_LSTM21_weights.02-43052.0469.keras', 'PRSA_data_at_MLP7_weights.80-0.0780.keras', 'PRSA_data_vola21_LSTM28_weights.02-0.0022.keras', 'PRSA_data_vola21_LSTM14_weights.04-0.0011.keras', 'PRSA_data_vola7_LSTM21_weights.18-0.0029.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0013.keras', 'PRSA_data_vola28_MLP14_weights.04-0.0013.keras', 'PRSA_data_price_MLP14_weights.52-626.2623.keras', 'PRSA_data_vola7_MLP7_weights.15-0.0041.keras', 'PRSA_data_price_LSTM14_weights.13-51325.8516.keras', 'PRSA_data_vola14_MLP7_weights.02-0.0023.keras', 'PRSA_data_price_LSTM21_weights.11-42962.6562.keras', 'PRSA_data_vola7_LSTM28_weights.10-0.0037.keras', 'PRSA_data_at_LSTM_weights.04-1.2071.keras', 'PRSA_data_price_MLP7_weights.50-1342.5273.keras', 'PRSA_data_price_MLP14_weights.03-5108.7837.keras', 'PRSA_data_vola28_MLP7_weights.14-0.0012.keras', 'PRSA_data_at_MLP7_weights.11-2.4883.keras', 'PRSA_data_price_LSTM28_weights.08-42832.5430.keras', 'PRSA_data_price_MLP28_weights.01-9242.6553.keras', 'PRSA_data_vola21_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.16-1013.2251.keras', 'PRSA_data_price_MLP28_weights.13-1042.5178.keras', 'PRSA_data_vola28_MLP28_weights.34-0.0006.keras', 'PRSA_data_at_MLP7_weights.01-19.6635.keras', 'PRSA_data_at_MLP7_weights.19-1.8379.keras', 'PRSA_data_at_MLP7_weights.12-2.3991.keras', 'PRSA_data_price_MLP7_weights.15-1346.3275.keras', 'PRSA_data_vola14_MLP28_weights.02-0.0024.keras', 'PRSA_data_at_LSTM21_weights.05-0.7513.keras', 'PRSA_data_vola14_MLP28_weights.07-0.0017.keras', 'PRSA_data_at_MLP28_weights.05-0.0930.keras', 'PRSA_data_price_LSTM14_weights.20-51256.7266.keras', 'PRSA_data_vola21_LSTM21_weights.05-0.0013.keras', 'PRSA_data_price_MLP14_weights.12-1304.7009.keras', 'PRSA_data_vola7_LSTM14_weights.09-0.0044.keras', 'PRSA_data_vola28_MLP28_weights.50-0.0006.keras', 'PRSA_data_vola7_MLP14_weights.15-0.0020.keras', 'PRSA_data_price_MLP14_weights.08-1386.5365.keras', 'PRSA_data_price_LSTM21_weights.09-42982.2383.keras', 'PRSA_data_price_MLP28_weights.11-7366.8994.keras', 'PRSA_data_vola14_MLP7_weights.18-0.0015.keras', 'PRSA_data_price_MLP7_weights.14-1734.7238.keras', 'PRSA_data_price_LSTM14_weights.15-51306.2070.keras', 'PRSA_data_vola21_MLP28_weights.25-0.0010.keras', 'PRSA_data_at_LSTM14_weights.02-2.2175.keras', 'PRSA_data_vola28_LSTM7_weights.08-0.0014.keras', 'PRSA_data_price_MLP28_weights.10-1107.4047.keras', 'PRSA_data_vola28_MLP7_weights.01-0.0068.keras', 'PRSA_data_vola21_LSTM21_weights.03-0.0015.keras', 'PRSA_data_price_MLP7_weights.01-9253.3047.keras', 'PRSA_data_vola21_MLP14_weights.31-0.0007.keras', 'PRSA_data_price_LSTM28_weights.12-42793.6172.keras', 'PRSA_data_price_LSTM14_weights.09-51365.0000.keras', 'PRSA_data_vola28_MLP28_weights.42-0.0006.keras', 'PRSA_data_price_MLP7_weights.05-4792.5054.keras', 'PRSA_data_vola21_LSTM_weights.01-0.0024.keras', 'PRSA_data_price_LSTM28_weights.17-42745.0352.keras', 'PRSA_data_vola28_MLP28_weights.45-0.0006.keras', 'PRSA_data_vola28_MLP14_weights.08-0.0010.keras', 'PRSA_data_at_LSTM_weights.11-0.6498.keras', 'PRSA_data_vola28_MLP28_weights.39-0.0007.keras', 'PRSA_data_at_MLP21_weights.11-0.0326.keras', 'PRSA_data_price_LSTM_weights.20-67006.4141.keras', 'PRSA_data_vola28_MLP7_weights.06-0.0014.keras', 'PRSA_data_at_LSTM14_weights.06-0.8145.keras', 'PRSA_data_at_MLP28_weights.01-2.1902.keras', 'PRSA_data_price_MLP14_weights.46-814.1992.keras', '.DS_Store', 'PRSA_data_at_LSTM21_weights.01-5.9452.keras', 'PRSA_data_vola28_LSTM28_weights.12-0.0006.keras', 'PRSA_data_price_LSTM14_weights.18-51276.5625.keras', 'PRSA_data_vola21_LSTM14_weights.01-0.0078.keras', 'PRSA_data_vola14_MLP21_weights.04-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM14_weights.02-0.0011.keras', 'PRSA_data_at_MLP7_weights.09-2.9793.keras', 'PRSA_data_vola21_MLP14_weights.06-0.0007.keras', 'PRSA_data_vola14_MLP21_weights.78-0.0010.keras', 'PRSA_data_price_MLP14_weights.64-711.9107.keras', 'PRSA_data_vola14_MLP28_weights.31-0.0011.keras', 'PRSA_data_vola14_LSTM_weights.08-0.0020.keras', 'PRSA_data_vola14_LSTM21_weights.02-0.0018.keras', 'PRSA_data_at_LSTM21_weights.02-2.0083.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0014.keras', 'PRSA_data_vola14_MLP14_weights.29-0.0005.keras', 'PRSA_data_price_LSTM_weights.03-67175.7734.keras', 'PRSA_data_vola14_LSTM_weights.01-0.0068.keras', 'PRSA_data_price_LSTM28_weights.11-42803.3086.keras', 'PRSA_data_price_MLP28_weights.15-7107.3306.keras', 'PRSA_data_vola14_MLP21_weights.49-0.0010.keras', 'PRSA_data_vola28_MLP7_weights.07-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0010.keras', 'PRSA_data_at_LSTM14_weights.15-0.4082.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0011.keras', 'PRSA_data_at_LSTM21_weights.19-0.3670.keras', 'PRSA_data_vola14_MLP14_weights.06-0.0006.keras', 'PRSA_data_vola14_LSTM21_weights.06-0.0012.keras', 'PRSA_data_price_LSTM_weights.17-67036.2891.keras', 'PRSA_data_vola28_MLP7_weights.03-0.0018.keras', 'PRSA_data_vola7_MLP28_weights.02-0.0039.keras', 'PRSA_data_vola7_LSTM28_weights.16-0.0035.keras', 'PRSA_data_vola21_MLP7_weights.20-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.33-0.0009.keras', 'PRSA_data_price_MLP21_weights.64-870.7020.keras', 'PRSA_data_vola21_MLP14_weights.22-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.14-0.0020.keras', 'PRSA_data_price_LSTM21_weights.17-42903.9648.keras', 'PRSA_data_vola21_MLP14_weights.01-0.0021.keras', 'PRSA_data_vola21_LSTM7_weights.16-0.0011.keras', 'PRSA_data_at_LSTM21_weights.17-0.5070.keras', 'PRSA_data_at_LSTM14_weights.11-0.6656.keras', 'PRSA_data_vola28_MLP7_weights.02-0.0019.keras', 'PRSA_data_vola14_MLP28_weights.05-0.0018.keras', 'PRSA_data_vola14_LSTM14_weights.08-0.0005.keras', 'PRSA_data_at_MLP21_weights.02-0.8171.keras', 'PRSA_data_price_MLP28_weights.02-8134.2202.keras', 'PRSA_data_price_LSTM28_weights.06-42852.3320.keras', 'PRSA_data_vola7_MLP14_weights.02-0.0036.keras', 'PRSA_data_vola14_LSTM28_weights.01-0.0026.keras', 'PRSA_data_at_MLP7_weights.04-9.8837.keras', 'PRSA_data_price_MLP28_weights.16-1274.4368.keras', 'PRSA_data_price_LSTM21_weights.07-43002.0078.keras', 'PRSA_data_vola7_MLP28_weights.23-0.0032.keras', 'PRSA_data_price_LSTM_weights.01-67196.3359.keras', 'PRSA_data_at_MLP28_weights.02-1.9156.keras', 'PRSA_data_vola14_MLP21_weights.02-0.0016.keras', 'PRSA_data_vola28_MLP14_weights.14-0.0007.keras', 'PRSA_data_price_LSTM28_weights.07-42842.4375.keras', 'PRSA_data_price_MLP14_weights.37-825.1805.keras', 'PRSA_data_vola7_MLP14_weights.07-0.0021.keras', 'PRSA_data_price_LSTM14_weights.04-51414.8945.keras', 'PRSA_data_price_MLP7_weights.42-1446.1339.keras', 'PRSA_data_price_MLP21_weights.57-932.6497.keras', 'PRSA_data_price_LSTM14_weights.19-51266.6133.keras', 'PRSA_data_vola21_MLP14_weights.34-0.0008.keras', 'PRSA_data_price_MLP14_weights.01-10485.8262.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0012.keras', 'PRSA_data_at_LSTM_weights.02-2.4966.keras', 'PRSA_data_vola21_LSTM28_weights.15-0.0009.keras', 'PRSA_data_price_LSTM_weights.20-67006.3047.keras', 'PRSA_data_price_LSTM_weights.14-67066.1016.keras', 'PRSA_data_price_LSTM21_weights.19-42884.2422.keras', 'PRSA_data_vola28_LSTM21_weights.18-0.0008.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0013.keras', 'PRSA_data_price_LSTM28_weights.20-42714.5508.keras', 'PRSA_data_at_LSTM14_weights.13-0.5609.keras', 'PRSA_data_vola28_LSTM28_weights.04-0.0009.keras', 'PRSA_data_vola14_MLP21_weights.01-0.0118.keras', 'PRSA_data_vola7_MLP21_weights.02-0.0032.keras', 'PRSA_data_at_MLP7_weights.74-0.2838.keras', 'PRSA_data_price_MLP28_weights.17-1217.0461.keras', 'PRSA_data_at_MLP21_weights.16-1.1973.keras', 'PRSA_data_vola7_MLP7_weights.02-0.0036.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0015.keras', 'PRSA_data_vola14_LSTM14_weights.04-0.0006.keras', 'PRSA_data_vola28_LSTM28_weights.18-0.0006.keras', 'PRSA_data_price_MLP21_weights.74-805.9489.keras', 'PRSA_data_vola21_MLP14_weights.02-0.0011.keras', 'PRSA_data_vola28_MLP21_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM_weights.11-0.0014.keras', 'PRSA_data_vola7_LSTM14_weights.14-0.0036.keras', 'PRSA_data_at_MLP28_weights.03-1.7712.keras', 'PRSA_data_price_LSTM21_weights.20-42874.3555.keras', 'PRSA_data_vola28_LSTM7_weights.20-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.04-0.0009.keras', 'PRSA_data_at_MLP7_weights.08-3.6716.keras', 'PRSA_data_at_MLP21_weights.04-0.0734.keras', 'PRSA_data_vola21_LSTM28_weights.03-0.0021.keras', 'PRSA_data_vola21_MLP14_weights.35-0.0008.keras', 'PRSA_data_vola28_LSTM7_weights.11-0.0012.keras', 'PRSA_data_price_LSTM28_weights.04-42872.1953.keras', 'PRSA_data_at_LSTM_weights.08-0.4803.keras', 'PRSA_data_vola28_MLP14_weights.05-0.0010.keras', 'PRSA_data_price_LSTM14_weights.14-51316.0625.keras', 'PRSA_data_vola7_MLP14_weights.01-0.0101.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0015.keras', 'PRSA_data_vola7_LSTM_weights.01-0.0040.keras', 'PRSA_data_vola14_MLP21_weights.09-0.0011.keras', 'PRSA_data_price_LSTM21_weights.05-43021.9570.keras', 'PRSA_data_vola28_MLP28_weights.03-0.0007.keras', 'PRSA_data_vola7_LSTM14_weights.10-0.0042.keras', 'PRSA_data_price_LSTM_weights.07-67135.3359.keras', 'PRSA_data_vola7_MLP14_weights.18-0.0020.keras', 'PRSA_data_price_LSTM21_weights.18-42894.1055.keras', 'PRSA_data_vola7_MLP7_weights.01-0.0042.keras', 'PRSA_data_vola14_LSTM_weights.05-0.0024.keras', 'PRSA_data_vola21_MLP14_weights.27-0.0008.keras', 'PRSA_data_vola28_MLP28_weights.02-0.0018.keras', 'PRSA_data_vola21_LSTM7_weights.03-0.0017.keras', 'PRSA_data_price_LSTM_weights.10-67105.4297.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0010.keras', 'PRSA_data_vola7_MLP21_weights.03-0.0030.keras', 'PRSA_data_at_MLP7_weights.06-5.3724.keras', 'PRSA_data_at_LSTM_weights.05-1.1226.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0026.keras', 'PRSA_data_vola21_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola14_MLP21_weights.05-0.0012.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.08-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.47-0.0008.keras', 'PRSA_data_price_LSTM14_weights.16-51296.3633.keras', 'PRSA_data_at_MLP7_weights.02-16.3421.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0030.keras', 'PRSA_data_vola21_MLP21_weights.09-0.0009.keras', 'PRSA_data_at_LSTM14_weights.01-6.2168.keras', 'PRSA_data_vola28_LSTM21_weights.01-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.16-0.0009.keras', 'PRSA_data_vola21_LSTM28_weights.07-0.0016.keras', 'PRSA_data_vola21_MLP28_weights.19-0.0010.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0009.keras', 'PRSA_data_vola14_MLP7_weights.09-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.02-0.0011.keras', 'PRSA_data_vola28_LSTM14_weights.03-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.09-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.09-0.0012.keras', 'PRSA_data_vola28_MLP21_weights.65-0.0007.keras', 'PRSA_data_vola21_MLP28_weights.15-0.0011.keras', 'PRSA_data_at_MLP7_weights.42-0.8694.keras', 'PRSA_data_vola21_LSTM28_weights.18-0.0008.keras', 'PRSA_data_vola21_MLP14_weights.04-0.0008.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0040.keras', 'PRSA_data_vola21_LSTM14_weights.12-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0029.keras', 'PRSA_data_vola21_MLP28_weights.01-0.0026.keras', 'PRSA_data_price_LSTM_weights.13-67075.9453.keras', 'PRSA_data_vola14_MLP7_weights.01-0.0041.keras', 'PRSA_data_price_MLP21_weights.02-1476.1034.keras', 'PRSA_data_vola14_LSTM28_weights.19-0.0014.keras', 'PRSA_data_price_MLP14_weights.96-663.0834.keras', 'PRSA_data_vola7_MLP21_weights.94-0.0024.keras', 'PRSA_data_price_LSTM21_weights.03-43041.9570.keras', 'PRSA_data_price_MLP7_weights.12-3798.9531.keras', 'PRSA_data_price_LSTM28_weights.10-42813.0352.keras', 'PRSA_data_vola14_MLP28_weights.20-0.0014.keras', 'PRSA_data_vola7_MLP21_weights.04-0.0027.keras', 'PRSA_data_vola7_LSTM28_weights.18-0.0035.keras', 'PRSA_data_at_MLP7_weights.10-2.7632.keras', 'PRSA_data_at_LSTM21_weights.18-0.3561.keras', 'PRSA_data_price_LSTM_weights.06-67145.4141.keras', 'PRSA_data_price_LSTM28_weights.13-42783.9805.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM28_weights.01-42902.4570.keras', 'PRSA_data_price_LSTM_weights.20-67005.9609.keras', 'PRSA_data_vola7_LSTM21_weights.04-0.0034.keras', 'PRSA_data_at_MLP14_weights.03-0.0461.keras', 'PRSA_data_at_LSTM21_weights.07-0.5113.keras', 'PRSA_data_vola28_MLP28_weights.01-0.0042.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0455.keras', 'PRSA_data_price_MLP28_weights.17-1182.9105.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0018.keras', 'PRSA_data_vola28_MLP14_weights.38-0.0006.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0013.keras', 'PRSA_data_vola7_LSTM21_weights.01-0.0038.keras', 'PRSA_data_price_LSTM14_weights.17-51286.4570.keras', 'PRSA_data_price_LSTM28_weights.16-42754.8008.keras', 'PRSA_data_price_LSTM14_weights.11-51345.4023.keras', 'PRSA_data_price_MLP14_weights.22-638.4409.keras', 'PRSA_data_vola7_MLP28_weights.32-0.0029.keras', 'PRSA_data_price_MLP7_weights.48-1648.1864.keras', 'PRSA_data_vola28_LSTM28_weights.10-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.03-0.0021.keras', 'PRSA_data_vola21_LSTM21_weights.13-0.0010.keras', 'PRSA_data_at_MLP7_weights.07-4.2627.keras', 'PRSA_data_vola7_LSTM21_weights.17-0.0029.keras', 'PRSA_data_vola21_LSTM28_weights.08-0.0011.keras', 'PRSA_data_price_MLP21_weights.05-1072.5081.keras', 'PRSA_data_at_LSTM21_weights.06-0.6219.keras', 'PRSA_data_vola7_LSTM21_weights.05-0.0030.keras', 'PRSA_data_price_MLP14_weights.29-856.8320.keras', 'PRSA_data_vola14_LSTM21_weights.11-0.0011.keras', 'PRSA_data_price_LSTM21_weights.10-42972.4219.keras', 'PRSA_data_at_LSTM21_weights.19-0.2744.keras', 'PRSA_data_price_LSTM_weights.02-67185.9453.keras', 'PRSA_data_vola28_MLP14_weights.21-0.0007.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.10-0.0009.keras', 'PRSA_data_price_LSTM28_weights.15-42764.5898.keras', 'PRSA_data_at_LSTM14_weights.07-0.7059.keras', 'PRSA_data_vola21_LSTM21_weights.01-0.0048.keras', 'PRSA_data_at_LSTM14_weights.03-1.3422.keras', 'PRSA_data_vola14_LSTM14_weights.02-0.0015.keras', 'PRSA_data_vola14_MLP14_weights.02-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.02-0.0034.keras', 'PRSA_data_vola7_MLP21_weights.21-0.0025.keras', 'PRSA_data_at_MLP7_weights.18-1.8396.keras', 'PRSA_data_price_LSTM_weights.11-67095.6172.keras', 'PRSA_data_at_LSTM14_weights.12-0.6378.keras', 'PRSA_data_vola21_LSTM21_weights.07-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.45-0.0030.keras', 'PRSA_data_at_MLP7_weights.26-0.8897.keras', 'PRSA_data_vola21_MLP7_weights.01-0.0014.keras', 'PRSA_data_price_MLP14_weights.19-982.5739.keras', 'PRSA_data_vola14_LSTM_weights.16-0.0017.keras', 'PRSA_data_price_MLP21_weights.94-813.8954.keras', 'PRSA_data_vola7_LSTM21_weights.20-0.0029.keras', 'PRSA_data_at_LSTM_weights.03-1.4842.keras', 'PRSA_data_vola21_MLP28_weights.02-0.0015.keras', 'PRSA_data_price_LSTM21_weights.12-42952.9453.keras', 'PRSA_data_price_MLP28_weights.17-1019.2816.keras', 'PRSA_data_vola14_LSTM_weights.13-0.0019.keras', 'PRSA_data_vola7_LSTM_weights.02-0.0038.keras', 'PRSA_data_vola7_LSTM28_weights.02-0.0042.keras', 'PRSA_data_at_LSTM14_weights.13-0.2270.keras', 'PRSA_data_price_LSTM_weights.18-67026.2891.keras', 'PRSA_data_vola28_MLP21_weights.66-0.0007.keras', 'PRSA_data_at_MLP14_weights.42-0.0373.keras', 'PRSA_data_vola7_MLP14_weights.38-0.0020.keras', 'PRSA_data_vola7_LSTM21_weights.11-0.0029.keras', 'PRSA_data_vola21_LSTM21_weights.12-0.0010.keras', 'PRSA_data_at_LSTM_weights.14-0.5158.keras', 'PRSA_data_vola14_LSTM21_weights.01-0.0019.keras', 'PRSA_data_vola28_LSTM21_weights.11-0.0009.keras', 'PRSA_data_vola7_LSTM28_weights.05-0.0038.keras', 'PRSA_data_vola14_MLP14_weights.10-0.0005.keras', 'PRSA_data_at_MLP21_weights.03-2.1540.keras', 'PRSA_data_at_LSTM_weights.16-0.5377.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.44-0.0008.keras', 'PRSA_data_at_MLP7_weights.16-1.9941.keras', 'PRSA_data_vola28_LSTM28_weights.01-0.0010.keras', 'PRSA_data_vola28_LSTM14_weights.19-0.0006.keras', 'PRSA_data_price_LSTM21_weights.14-42933.4180.keras', 'PRSA_data_vola14_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.82-828.7976.keras', 'PRSA_data_vola28_MLP21_weights.01-0.0030.keras', 'PRSA_data_price_LSTM14_weights.10-51355.2070.keras', 'PRSA_data_vola21_MLP21_weights.18-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.73-0.0007.keras', 'PRSA_data_vola28_MLP7_weights.20-0.0011.keras', 'PRSA_data_price_LSTM21_weights.13-42943.1719.keras', 'PRSA_data_vola28_MLP28_weights.36-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.04-0.0025.keras', 'PRSA_data_at_MLP14_weights.02-0.2715.keras', 'PRSA_data_price_LSTM14_weights.05-51404.8945.keras', 'PRSA_data_price_LSTM28_weights.19-42725.4297.keras', 'PRSA_data_price_MLP28_weights.01-1561.6656.keras', 'PRSA_data_vola21_MLP7_weights.14-0.0011.keras', 'PRSA_data_vola7_MLP7_weights.23-0.0033.keras', 'PRSA_data_vola14_MLP28_weights.01-0.0026.keras', 'PRSA_data_at_MLP14_weights.29-0.0323.keras', 'PRSA_data_vola7_MLP21_weights.01-0.0033.keras', 'PRSA_data_vola21_LSTM_weights.19-0.0014.keras', 'PRSA_data_price_MLP7_weights.03-5953.8076.keras', 'PRSA_data_price_MLP21_weights.42-948.6628.keras', 'PRSA_data_vola21_LSTM14_weights.07-0.0008.keras', 'PRSA_data_vola14_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola7_LSTM28_weights.13-0.0037.keras', 'PRSA_data_vola21_MLP21_weights.01-0.0016.keras', 'PRSA_data_vola14_LSTM21_weights.08-0.0011.keras', 'PRSA_data_price_LSTM21_weights.06-43011.9570.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0019.keras', 'PRSA_data_vola7_MLP28_weights.16-0.0033.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0008.keras', 'PRSA_data_vola28_MLP14_weights.01-0.0031.keras', 'PRSA_data_vola14_MLP28_weights.11-0.0015.keras', 'PRSA_data_price_LSTM14_weights.03-51425.0000.keras', 'PRSA_data_vola7_LSTM28_weights.04-0.0039.keras', 'PRSA_data_vola28_LSTM7_weights.13-0.0011.keras', 'PRSA_data_price_LSTM14_weights.01-51445.4883.keras', 'PRSA_data_vola28_MLP28_weights.22-0.0006.keras', 'PRSA_data_price_LSTM_weights.09-67115.3359.keras', 'PRSA_data_vola28_LSTM7_weights.14-0.0011.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0050.keras', 'PRSA_data_vola21_MLP28_weights.17-0.0010.keras', 'PRSA_data_vola28_LSTM21_weights.09-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.13-0.0010.keras', 'PRSA_data_vola14_MLP14_weights.01-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.50-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.2184.keras', 'PRSA_data_vola14_LSTM_weights.02-0.0030.keras', 'PRSA_data_vola7_MLP28_weights.39-0.0030.keras', 'PRSA_data_price_LSTM21_weights.04-43031.9414.keras', 'PRSA_data_price_LSTM_weights.15-67056.2109.keras', 'PRSA_data_at_MLP7_weights.20-1.6814.keras', 'PRSA_data_vola28_LSTM7_weights.18-0.0010.keras', 'PRSA_data_at_MLP7_weights.95-0.2130.keras', 'PRSA_data_vola14_LSTM28_weights.03-0.0021.keras', 'PRSA_data_at_LSTM_weights.01-7.5653.keras', 'PRSA_data_at_MLP7_weights.05-7.2110.keras', 'PRSA_data_vola21_LSTM28_weights.20-0.0008.keras', 'PRSA_data_at_LSTM_weights.07-0.8569.keras', 'PRSA_data_vola14_LSTM28_weights.12-0.0014.keras', 'PRSA_data_price_LSTM21_weights.16-42913.7969.keras', 'PRSA_data_at_MLP28_weights.04-1.3772.keras', 'PRSA_data_price_LSTM_weights.08-67125.3047.keras', 'PRSA_data_price_LSTM14_weights.07-51384.8633.keras', 'PRSA_data_price_LSTM_weights.04-67165.6016.keras', 'PRSA_data_vola14_LSTM28_weights.15-0.0014.keras', 'PRSA_data_at_MLP14_weights.01-0.4195.keras', 'PRSA_data_price_LSTM28_weights.09-42822.7578.keras', 'PRSA_data_price_LSTM28_weights.03-42882.1953.keras', 'PRSA_data_at_LSTM14_weights.05-0.8408.keras', 'PRSA_data_vola7_MLP28_weights.08-0.0030.keras', 'PRSA_data_price_MLP14_weights.07-3004.8474.keras', 'PRSA_data_price_LSTM21_weights.15-42923.6367.keras', 'PRSA_data_vola7_MLP21_weights.81-0.0025.keras', 'PRSA_data_price_MLP21_weights.86-819.7106.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.05-0.0012.keras', 'PRSA_data_vola21_MLP14_weights.20-0.0007.keras', 'PRSA_data_vola14_MLP28_weights.42-0.0011.keras', 'PRSA_data_price_LSTM_weights.12-67085.7578.keras', 'PRSA_data_vola28_MLP21_weights.13-0.0008.keras', 'PRSA_data_vola14_MLP28_weights.08-0.0016.keras', 'PRSA_data_at_MLP7_weights.86-0.0883.keras', 'PRSA_data_vola28_LSTM14_weights.01-0.0020.keras', 'PRSA_data_vola21_MLP21_weights.71-0.0010.keras', 'PRSA_data_at_MLP28_weights.09-0.1537.keras', 'PRSA_data_vola7_MLP28_weights.48-0.0030.keras', 'PRSA_data_at_MLP28_weights.12-0.0354.keras', 'PRSA_data_at_LSTM14_weights.04-0.9732.keras', 'PRSA_data_at_LSTM14_weights.15-0.2154.keras', 'PRSA_data_vola14_MLP28_weights.21-0.0013.keras', 'PRSA_data_vola21_MLP14_weights.25-0.0009.keras', 'PRSA_data_vola14_LSTM14_weights.01-0.0027.keras', 'PRSA_data_at_LSTM21_weights.20-0.0855.keras', 'PRSA_data_vola21_LSTM28_weights.01-0.0039.keras', 'PRSA_data_price_MLP14_weights.02-8059.2510.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0013.keras', 'PRSA_data_vola28_MLP7_weights.04-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.02-0.0043.keras', 'PRSA_data_price_MLP14_weights.78-683.9230.keras', 'PRSA_data_price_MLP21_weights.19-1002.5925.keras', 'PRSA_data_vola28_MLP21_weights.92-0.0007.keras', 'PRSA_data_price_LSTM28_weights.14-42774.2500.keras', 'PRSA_data_vola21_MLP21_weights.06-0.0010.keras', 'PRSA_data_vola14_LSTM14_weights.03-0.0010.keras', 'PRSA_data_price_LSTM_weights.16-67046.2422.keras', 'PRSA_data_vola7_LSTM28_weights.07-0.0037.keras', 'PRSA_data_vola28_LSTM7_weights.01-0.0017.keras', 'PRSA_data_vola28_LSTM14_weights.02-0.0008.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0022.keras', 'PRSA_data_at_MLP21_weights.03-0.2039.keras', 'PRSA_data_vola14_LSTM28_weights.02-0.0023.keras', 'PRSA_data_at_MLP7_weights.03-13.2212.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0037.keras', 'PRSA_data_vola14_MLP21_weights.69-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.18-0.0013.keras', 'PRSA_data_vola7_MLP21_weights.49-0.0025.keras', 'PRSA_data_vola28_MLP28_weights.31-0.0006.keras', 'PRSA_data_price_MLP21_weights.77-841.5739.keras', 'PRSA_data_vola28_MLP14_weights.35-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.26-0.0031.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0065.keras', 'PRSA_data_price_LSTM14_weights.02-51435.1133.keras', 'PRSA_data_price_LSTM28_weights.20-42714.7695.keras', 'PRSA_data_vola28_LSTM28_weights.16-0.0006.keras', 'PRSA_data_vola14_LSTM28_weights.06-0.0016.keras', 'PRSA_data_vola21_MLP21_weights.13-0.0010.keras', 'PRSA_data_vola28_MLP14_weights.32-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.01-0.0043.keras', 'PRSA_data_vola21_MLP28_weights.08-0.0011.keras', 'PRSA_data_vola21_LSTM28_weights.10-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.14-0.0012.keras', 'PRSA_data_vola7_LSTM28_weights.01-0.0049.keras', 'PRSA_data_price_LSTM28_weights.20-42715.6250.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM_weights.05-67155.5078.keras', 'PRSA_data_price_MLP14_weights.14-1073.6493.keras', 'PRSA_data_vola14_LSTM_weights.03-0.0029.keras', 'PRSA_data_vola14_MLP7_weights.07-0.0021.keras', 'PRSA_data_at_MLP28_weights.03-0.0637.keras', 'PRSA_data_vola21_MLP28_weights.03-0.0012.keras', 'PRSA_data_at_MLP28_weights.05-0.1823.keras', 'PRSA_data_at_MLP21_weights.01-0.9762.keras', 'PRSA_data_vola7_LSTM28_weights.15-0.0035.keras', 'PRSA_data_price_LSTM21_weights.08-42992.0703.keras', 'PRSA_data_vola21_MLP21_weights.22-0.0010.keras', 'PRSA_data_vola28_LSTM28_weights.20-0.0006.keras', 'PRSA_data_at_MLP21_weights.01-4.3241.keras', 'PRSA_data_at_LSTM_weights.06-0.8671.keras', 'PRSA_data_price_MLP7_weights.37-1654.4709.keras', 'PRSA_data_at_LSTM_weights.20-0.4626.keras', 'PRSA_data_vola21_MLP28_weights.27-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.4729.keras', 'PRSA_data_at_MLP7_weights.24-1.4718.keras', 'PRSA_data_at_MLP7_weights.59-0.4636.keras', 'PRSA_data_vola14_LSTM14_weights.06-0.0005.keras', 'PRSA_data_at_MLP7_weights.14-2.2322.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0196.keras', 'PRSA_data_at_LSTM14_weights.19-0.3356.keras', 'PRSA_data_price_LSTM28_weights.18-42735.2305.keras', 'PRSA_data_at_LSTM21_weights.04-0.8083.keras', 'PRSA_data_price_LSTM14_weights.12-51335.6523.keras', 'PRSA_data_vola21_LSTM_weights.06-0.0017.keras', 'PRSA_data_price_LSTM28_weights.05-42862.2383.keras', 'PRSA_data_price_MLP7_weights.10-1248.9592.keras', 'PRSA_data_vola21_MLP28_weights.35-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.03-0.0013.keras', 'PRSA_data_at_LSTM21_weights.03-1.0323.keras', 'PRSA_data_price_MLP14_weights.04-5028.7607.keras', 'PRSA_data_price_LSTM14_weights.08-51374.8945.keras', 'PRSA_data_vola28_MLP14_weights.48-0.0007.keras']
# Predicciones usando el mejor modelo
if best21_lstm_at is not None:
train_preds_at_LSTM21 = best21_lstm_at.predict(X_train_at_lstm_21)
val_preds_at_LSTM21 = best21_lstm_at.predict(X_val_at_lstm_21)
test_preds_at_LSTM21 = best21_lstm_at.predict(X_test_at_lstm_21)
# Aplanar las predicciones si es necesario
train_preds_at_LSTM21 = np.squeeze(train_preds_at_LSTM21)
val_preds_at_LSTM21 = np.squeeze(val_preds_at_LSTM21)
test_preds_at_LSTM21 = np.squeeze(test_preds_at_LSTM21)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_at_LSTM21)
print("Predicciones de validación:", val_preds_at_LSTM21)
print("Predicciones de prueba:", test_preds_at_LSTM21)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
154/154 [==============================] - 1s 2ms/step
1/1 [==============================] - 0s 9ms/step
1/1 [==============================] - 0s 9ms/step
Predicciones de Entrenamiento: [-0.14549696 -0.14549696 -0.14549696 ... 23.151617 23.151577
23.151493 ]
Predicciones de validación: [23.15135 23.150986 23.150597 23.15023 23.149775 23.149418 23.149054
23.14864 23.148277 23.147985 23.147743 23.147642 23.147684 23.147736
23.147783 23.147837 23.147953 23.148233 23.148668 23.149273 23.149973]
Predicciones de prueba: [23.150743 23.151648 23.1525 23.153406 23.15428 23.15509 23.155857
23.156622 23.15735 23.1581 23.15883 23.159513 23.16015 23.160795
23.161413 23.16208 23.162794 23.163641 23.164438 23.16522 23.16596 ]
plot_model(data_train_plot_at21[-100:], data_val_plot_at21, data_test_plot_at21, val_preds_at_LSTM21, test_preds_at_LSTM21, "Predicciones usando Memoria a Corto y Largo Paso (LSTM)")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train21_at, jarque_bera_pval_LSTM_train21_at = diagnostic_plots(y_train_at21, train_preds_at_LSTM21)
Ljung-Box LB Statistic: 3668.581655
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_at_LSTM_train21 = metricas(y_train_at21,train_preds_at_LSTM21)
metrica_at_LSTM_train21.index = metrica_at_LSTM_train21.index.map({0: 'LSTM Entrenamiento Retorno Acumulado τ = 21'})
metrica_at_LSTM_train21['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train21_at], index=metrica_at_LSTM_train21.index)
metrica_at_LSTM_train21['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train21_at], index=metrica_at_LSTM_train21.index)
metrica_at_LSTM_train21
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Retorno Acumulado τ = 21 | 258.0266 | 1.25% | 0.17 | 0.05 | 99.86% | 0.0 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM21_at, jarque_bera_pvalLSTM21_at = evaluate_residuals(data_test_plot_at21, test_preds_at_LSTM21)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test21_at = metricas(y_test_at21,test_preds_at_LSTM21)
metrica_LSTM_test21_at.index = metrica_LSTM_test21_at.index.map({0: 'LSTM Prueba Retorno Acumulado τ = 21'})
metrica_LSTM_test21_at['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM21_at], index=metrica_LSTM_test21_at.index)
metrica_LSTM_test21_at['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM21_at], index=metrica_LSTM_test21_at.index)
metrica_LSTM_test21_at
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Retorno Acumulado τ = 21 | 2.1934 | 1.33% | 0.31 | 0.1 | -1359.33% | 0.0162 | 0.8636 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_at_LSTM21)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_at21, train_preds_at_LSTM21, y_val_at21, val_preds_at_LSTM21, y_test_at21, test_preds_at_LSTM21)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo, ademas muestra un sesgo considerable a la derecha y una alta variabilidad.
Conjunto de Validación: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 28 días (\(\tau=28\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_at_lstm_28, X_val_at_lstm_28, X_test_at_lstm_28 = change_dimension_lstm(X_train_at28, X_val_at28, X_test_at28)
Shape of 3D arrays X: (4887, 28, 1) (28, 28, 1) (28, 28, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(28,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=10, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_at_lstm_28, y_train_at28)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 44.2s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 44.3s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 44.4s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 44.5s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 44.6s
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 0.6302218685093333
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape28 = 28
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM28_at = build_models_lstm(input_shape28, neurons_list, dropout_rates, 'SGD')
Model: "model_256"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_257 (InputLayer) [(None, 28, 1)] 0
lstm_240 (LSTM) (None, 28, 64) 16896
lstm_241 (LSTM) (None, 32) 12416
dropout_256 (Dropout) (None, 32) 0
dense_664 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_256"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_257 (InputLayer) [(None, 28, 1)] 0
lstm_240 (LSTM) (None, 28, 64) 16896
lstm_241 (LSTM) (None, 32) 12416
dropout_256 (Dropout) (None, 32) 0
dense_664 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_257"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_258 (InputLayer) [(None, 28, 1)] 0
lstm_242 (LSTM) (None, 28, 64) 16896
lstm_243 (LSTM) (None, 32) 12416
dropout_257 (Dropout) (None, 32) 0
dense_665 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_257"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_258 (InputLayer) [(None, 28, 1)] 0
lstm_242 (LSTM) (None, 28, 64) 16896
lstm_243 (LSTM) (None, 32) 12416
dropout_257 (Dropout) (None, 32) 0
dense_665 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_258"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_259 (InputLayer) [(None, 28, 1)] 0
lstm_244 (LSTM) (None, 28, 64) 16896
lstm_245 (LSTM) (None, 32) 12416
dropout_258 (Dropout) (None, 32) 0
dense_666 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_258"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_259 (InputLayer) [(None, 28, 1)] 0
lstm_244 (LSTM) (None, 28, 64) 16896
lstm_245 (LSTM) (None, 32) 12416
dropout_258 (Dropout) (None, 32) 0
dense_666 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_259"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_260 (InputLayer) [(None, 28, 1)] 0
lstm_246 (LSTM) (None, 28, 64) 16896
lstm_247 (LSTM) (None, 32) 12416
dropout_259 (Dropout) (None, 32) 0
dense_667 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_259"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_260 (InputLayer) [(None, 28, 1)] 0
lstm_246 (LSTM) (None, 28, 64) 16896
lstm_247 (LSTM) (None, 32) 12416
dropout_259 (Dropout) (None, 32) 0
dense_667 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_260"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_261 (InputLayer) [(None, 28, 1)] 0
lstm_248 (LSTM) (None, 28, 64) 16896
lstm_249 (LSTM) (None, 32) 12416
dropout_260 (Dropout) (None, 32) 0
dense_668 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_260"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_261 (InputLayer) [(None, 28, 1)] 0
lstm_248 (LSTM) (None, 28, 64) 16896
lstm_249 (LSTM) (None, 32) 12416
dropout_260 (Dropout) (None, 32) 0
dense_668 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_261"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_262 (InputLayer) [(None, 28, 1)] 0
lstm_250 (LSTM) (None, 28, 64) 16896
lstm_251 (LSTM) (None, 32) 12416
dropout_261 (Dropout) (None, 32) 0
dense_669 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_261"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_262 (InputLayer) [(None, 28, 1)] 0
lstm_250 (LSTM) (None, 28, 64) 16896
lstm_251 (LSTM) (None, 32) 12416
dropout_261 (Dropout) (None, 32) 0
dense_669 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_262"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_263 (InputLayer) [(None, 28, 1)] 0
lstm_252 (LSTM) (None, 28, 64) 16896
lstm_253 (LSTM) (None, 32) 12416
dropout_262 (Dropout) (None, 32) 0
dense_670 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_262"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_263 (InputLayer) [(None, 28, 1)] 0
lstm_252 (LSTM) (None, 28, 64) 16896
lstm_253 (LSTM) (None, 32) 12416
dropout_262 (Dropout) (None, 32) 0
dense_670 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_263"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_264 (InputLayer) [(None, 28, 1)] 0
lstm_254 (LSTM) (None, 28, 64) 16896
lstm_255 (LSTM) (None, 32) 12416
dropout_263 (Dropout) (None, 32) 0
dense_671 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_263"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_264 (InputLayer) [(None, 28, 1)] 0
lstm_254 (LSTM) (None, 28, 64) 16896
lstm_255 (LSTM) (None, 32) 12416
dropout_263 (Dropout) (None, 32) 0
dense_671 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_264"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_265 (InputLayer) [(None, 28, 1)] 0
lstm_256 (LSTM) (None, 28, 64) 16896
lstm_257 (LSTM) (None, 32) 12416
dropout_264 (Dropout) (None, 32) 0
dense_672 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_264"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_265 (InputLayer) [(None, 28, 1)] 0
lstm_256 (LSTM) (None, 28, 64) 16896
lstm_257 (LSTM) (None, 32) 12416
dropout_264 (Dropout) (None, 32) 0
dense_672 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_265"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_266 (InputLayer) [(None, 28, 1)] 0
lstm_258 (LSTM) (None, 28, 64) 16896
lstm_259 (LSTM) (None, 32) 12416
dropout_265 (Dropout) (None, 32) 0
dense_673 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_265"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_266 (InputLayer) [(None, 28, 1)] 0
lstm_258 (LSTM) (None, 28, 64) 16896
lstm_259 (LSTM) (None, 32) 12416
dropout_265 (Dropout) (None, 32) 0
dense_673 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_266"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_267 (InputLayer) [(None, 28, 1)] 0
lstm_260 (LSTM) (None, 28, 64) 16896
lstm_261 (LSTM) (None, 32) 12416
dropout_266 (Dropout) (None, 32) 0
dense_674 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_266"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_267 (InputLayer) [(None, 28, 1)] 0
lstm_260 (LSTM) (None, 28, 64) 16896
lstm_261 (LSTM) (None, 32) 12416
dropout_266 (Dropout) (None, 32) 0
dense_674 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_267"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_268 (InputLayer) [(None, 28, 1)] 0
lstm_262 (LSTM) (None, 28, 64) 16896
lstm_263 (LSTM) (None, 32) 12416
dropout_267 (Dropout) (None, 32) 0
dense_675 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_267"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_268 (InputLayer) [(None, 28, 1)] 0
lstm_262 (LSTM) (None, 28, 64) 16896
lstm_263 (LSTM) (None, 32) 12416
dropout_267 (Dropout) (None, 32) 0
dense_675 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_268"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_269 (InputLayer) [(None, 28, 1)] 0
lstm_264 (LSTM) (None, 28, 64) 16896
lstm_265 (LSTM) (None, 32) 12416
dropout_268 (Dropout) (None, 32) 0
dense_676 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_268"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_269 (InputLayer) [(None, 28, 1)] 0
lstm_264 (LSTM) (None, 28, 64) 16896
lstm_265 (LSTM) (None, 32) 12416
dropout_268 (Dropout) (None, 32) 0
dense_676 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_269"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_270 (InputLayer) [(None, 28, 1)] 0
lstm_266 (LSTM) (None, 28, 64) 16896
lstm_267 (LSTM) (None, 32) 12416
dropout_269 (Dropout) (None, 32) 0
dense_677 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_269"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_270 (InputLayer) [(None, 28, 1)] 0
lstm_266 (LSTM) (None, 28, 64) 16896
lstm_267 (LSTM) (None, 32) 12416
dropout_269 (Dropout) (None, 32) 0
dense_677 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_270"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_271 (InputLayer) [(None, 28, 1)] 0
lstm_268 (LSTM) (None, 28, 64) 16896
lstm_269 (LSTM) (None, 32) 12416
dropout_270 (Dropout) (None, 32) 0
dense_678 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_270"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_271 (InputLayer) [(None, 28, 1)] 0
lstm_268 (LSTM) (None, 28, 64) 16896
lstm_269 (LSTM) (None, 32) 12416
dropout_270 (Dropout) (None, 32) 0
dense_678 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_271"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_272 (InputLayer) [(None, 28, 1)] 0
lstm_270 (LSTM) (None, 28, 64) 16896
lstm_271 (LSTM) (None, 32) 12416
dropout_271 (Dropout) (None, 32) 0
dense_679 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_271"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_272 (InputLayer) [(None, 28, 1)] 0
lstm_270 (LSTM) (None, 28, 64) 16896
lstm_271 (LSTM) (None, 32) 12416
dropout_271 (Dropout) (None, 32) 0
dense_679 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_at_LSTM28_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best28_lstm_at = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM28_at.
import os
from joblib import dump, load
history_at_LSTM28 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM28_at):
filename = f'history_at_LSTM28_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_at_lstm_28, y=y_train_at28, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best28_lstm_at], validation_data=(X_val_at_lstm_28, y_val_at28),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_at_LSTM28.append(model_history if isinstance(model_history, dict) else model_history.history)
Epoch 1/20
Epoch 1: val_loss improved from inf to 6.07694, saving model to keras_models/PRSA_data_at_LSTM28_weights.01-6.0769.keras
306/306 - 3s - loss: 6.2694 - val_loss: 6.0769 - 3s/epoch - 11ms/step
Epoch 2/20
Epoch 2: val_loss improved from 6.07694 to 2.07816, saving model to keras_models/PRSA_data_at_LSTM28_weights.02-2.0782.keras
306/306 - 2s - loss: 1.7232 - val_loss: 2.0782 - 2s/epoch - 6ms/step
Epoch 3/20
Epoch 3: val_loss improved from 2.07816 to 1.31482, saving model to keras_models/PRSA_data_at_LSTM28_weights.03-1.3148.keras
306/306 - 2s - loss: 1.3172 - val_loss: 1.3148 - 2s/epoch - 7ms/step
Epoch 4/20
Epoch 4: val_loss improved from 1.31482 to 0.78508, saving model to keras_models/PRSA_data_at_LSTM28_weights.04-0.7851.keras
306/306 - 2s - loss: 1.2475 - val_loss: 0.7851 - 2s/epoch - 7ms/step
Epoch 5/20
Epoch 5: val_loss improved from 0.78508 to 0.54073, saving model to keras_models/PRSA_data_at_LSTM28_weights.05-0.5407.keras
306/306 - 2s - loss: 1.1983 - val_loss: 0.5407 - 2s/epoch - 7ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.54073
306/306 - 2s - loss: 1.1771 - val_loss: 0.7633 - 2s/epoch - 7ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.54073
306/306 - 2s - loss: 1.1656 - val_loss: 0.7754 - 2s/epoch - 6ms/step
Epoch 8/20
Epoch 8: val_loss improved from 0.54073 to 0.39809, saving model to keras_models/PRSA_data_at_LSTM28_weights.08-0.3981.keras
306/306 - 2s - loss: 1.1523 - val_loss: 0.3981 - 2s/epoch - 7ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1772 - val_loss: 0.9098 - 2s/epoch - 6ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1757 - val_loss: 0.6525 - 2s/epoch - 6ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1599 - val_loss: 0.5600 - 2s/epoch - 6ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1304 - val_loss: 0.5192 - 2s/epoch - 6ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1642 - val_loss: 0.9182 - 2s/epoch - 7ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1618 - val_loss: 0.4884 - 2s/epoch - 6ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1333 - val_loss: 0.7510 - 2s/epoch - 6ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1421 - val_loss: 0.5794 - 2s/epoch - 6ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1563 - val_loss: 0.7627 - 2s/epoch - 6ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1346 - val_loss: 0.9578 - 2s/epoch - 6ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1080 - val_loss: 0.7138 - 2s/epoch - 7ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1049 - val_loss: 0.6503 - 2s/epoch - 6ms/step
El entrenamiento del modelo 1 se ha completado y el historial ha sido guardado en 'history_at_LSTM28_model_0.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.39809
306/306 - 3s - loss: 6.6631 - val_loss: 6.4957 - 3s/epoch - 11ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.8935 - val_loss: 2.1516 - 2s/epoch - 6ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.3087 - val_loss: 1.2752 - 2s/epoch - 6ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.2450 - val_loss: 0.8948 - 2s/epoch - 7ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.2022 - val_loss: 0.7663 - 2s/epoch - 7ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1730 - val_loss: 0.7578 - 2s/epoch - 6ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1869 - val_loss: 0.6607 - 2s/epoch - 6ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1909 - val_loss: 0.6889 - 2s/epoch - 7ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1886 - val_loss: 0.5690 - 2s/epoch - 6ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1663 - val_loss: 0.8506 - 2s/epoch - 6ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1826 - val_loss: 0.7723 - 2s/epoch - 6ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1585 - val_loss: 0.9272 - 2s/epoch - 6ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1453 - val_loss: 0.6501 - 2s/epoch - 6ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1576 - val_loss: 1.0861 - 2s/epoch - 6ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1354 - val_loss: 0.7257 - 2s/epoch - 6ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1211 - val_loss: 0.8027 - 2s/epoch - 6ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1107 - val_loss: 0.4272 - 2s/epoch - 6ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1228 - val_loss: 1.0768 - 2s/epoch - 6ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.0803 - val_loss: 0.5982 - 2s/epoch - 6ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1151 - val_loss: 0.5450 - 2s/epoch - 7ms/step
El entrenamiento del modelo 2 se ha completado y el historial ha sido guardado en 'history_at_LSTM28_model_1.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.39809
306/306 - 3s - loss: 6.6132 - val_loss: 6.4544 - 3s/epoch - 11ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.7627 - val_loss: 2.0776 - 2s/epoch - 7ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.3373 - val_loss: 1.1221 - 2s/epoch - 7ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.2799 - val_loss: 0.7714 - 2s/epoch - 8ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.2392 - val_loss: 0.8777 - 2s/epoch - 7ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1798 - val_loss: 0.8539 - 2s/epoch - 6ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.2244 - val_loss: 0.5361 - 2s/epoch - 7ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1703 - val_loss: 0.6999 - 2s/epoch - 6ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.39809
306/306 - 2s - loss: 1.1653 - val_loss: 0.4788 - 2s/epoch - 6ms/step
Epoch 10/20
Epoch 10: val_loss improved from 0.39809 to 0.28205, saving model to keras_models/PRSA_data_at_LSTM28_weights.10-0.2820.keras
306/306 - 2s - loss: 1.1777 - val_loss: 0.2820 - 2s/epoch - 7ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.28205
306/306 - 2s - loss: 1.1557 - val_loss: 0.7057 - 2s/epoch - 7ms/step
Epoch 12/20
Epoch 12: val_loss improved from 0.28205 to 0.07049, saving model to keras_models/PRSA_data_at_LSTM28_weights.12-0.0705.keras
306/306 - 2s - loss: 1.1431 - val_loss: 0.0705 - 2s/epoch - 7ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.07049
306/306 - 2s - loss: 1.1741 - val_loss: 0.3814 - 2s/epoch - 6ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.07049
306/306 - 2s - loss: 1.1367 - val_loss: 0.3854 - 2s/epoch - 6ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.07049
306/306 - 2s - loss: 1.1277 - val_loss: 0.7433 - 2s/epoch - 7ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.07049
306/306 - 2s - loss: 1.1350 - val_loss: 0.1121 - 2s/epoch - 7ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.07049
306/306 - 2s - loss: 1.1294 - val_loss: 0.2374 - 2s/epoch - 6ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.07049
306/306 - 2s - loss: 1.1347 - val_loss: 0.5232 - 2s/epoch - 6ms/step
Epoch 19/20
Epoch 19: val_loss improved from 0.07049 to 0.06510, saving model to keras_models/PRSA_data_at_LSTM28_weights.19-0.0651.keras
306/306 - 2s - loss: 1.1504 - val_loss: 0.0651 - 2s/epoch - 6ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1298 - val_loss: 0.3884 - 2s/epoch - 6ms/step
El entrenamiento del modelo 3 se ha completado y el historial ha sido guardado en 'history_at_LSTM28_model_2.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.06510
306/306 - 3s - loss: 6.1599 - val_loss: 5.9525 - 3s/epoch - 11ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.7358 - val_loss: 2.1091 - 2s/epoch - 6ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2884 - val_loss: 1.0324 - 2s/epoch - 6ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2215 - val_loss: 0.9962 - 2s/epoch - 6ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2223 - val_loss: 0.8080 - 2s/epoch - 6ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1875 - val_loss: 0.6144 - 2s/epoch - 6ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1653 - val_loss: 0.7800 - 2s/epoch - 6ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1751 - val_loss: 0.6239 - 2s/epoch - 6ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1781 - val_loss: 0.4822 - 2s/epoch - 6ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1744 - val_loss: 0.8834 - 2s/epoch - 6ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1576 - val_loss: 0.7279 - 2s/epoch - 6ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1584 - val_loss: 0.5910 - 2s/epoch - 6ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1413 - val_loss: 0.7309 - 2s/epoch - 6ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1400 - val_loss: 0.6851 - 2s/epoch - 6ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1476 - val_loss: 0.7337 - 2s/epoch - 6ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1307 - val_loss: 0.4824 - 2s/epoch - 6ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1252 - val_loss: 0.5360 - 2s/epoch - 7ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.0991 - val_loss: 0.4885 - 2s/epoch - 6ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1198 - val_loss: 0.4784 - 2s/epoch - 6ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1109 - val_loss: 0.4470 - 2s/epoch - 7ms/step
El entrenamiento del modelo 4 se ha completado y el historial ha sido guardado en 'history_at_LSTM28_model_3.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.06510
306/306 - 3s - loss: 7.1564 - val_loss: 7.3312 - 3s/epoch - 11ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.06510
306/306 - 2s - loss: 2.0941 - val_loss: 2.4293 - 2s/epoch - 7ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.3527 - val_loss: 1.2678 - 2s/epoch - 8ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.06510
306/306 - 3s - loss: 1.2489 - val_loss: 0.8633 - 3s/epoch - 8ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2179 - val_loss: 0.7089 - 2s/epoch - 7ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2110 - val_loss: 0.7799 - 2s/epoch - 7ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1995 - val_loss: 0.8350 - 2s/epoch - 7ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1862 - val_loss: 0.5729 - 2s/epoch - 7ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1963 - val_loss: 1.0190 - 2s/epoch - 7ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1538 - val_loss: 0.8097 - 2s/epoch - 7ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1496 - val_loss: 0.5658 - 2s/epoch - 7ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1713 - val_loss: 0.6036 - 2s/epoch - 7ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1388 - val_loss: 0.7272 - 2s/epoch - 7ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1530 - val_loss: 0.4419 - 2s/epoch - 7ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1411 - val_loss: 0.8496 - 2s/epoch - 7ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1529 - val_loss: 0.6449 - 2s/epoch - 8ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1126 - val_loss: 1.3918 - 2s/epoch - 7ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1165 - val_loss: 0.4319 - 2s/epoch - 7ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1078 - val_loss: 0.9877 - 2s/epoch - 7ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1216 - val_loss: 0.2638 - 2s/epoch - 7ms/step
El entrenamiento del modelo 5 se ha completado y el historial ha sido guardado en 'history_at_LSTM28_model_4.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.06510
306/306 - 3s - loss: 6.8043 - val_loss: 6.7232 - 3s/epoch - 11ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.9045 - val_loss: 2.1815 - 2s/epoch - 7ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.3014 - val_loss: 1.3468 - 2s/epoch - 7ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2603 - val_loss: 1.0995 - 2s/epoch - 7ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2046 - val_loss: 0.6857 - 2s/epoch - 7ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2096 - val_loss: 0.5994 - 2s/epoch - 7ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1798 - val_loss: 0.4299 - 2s/epoch - 7ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1797 - val_loss: 0.9343 - 2s/epoch - 7ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1735 - val_loss: 0.4411 - 2s/epoch - 7ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1644 - val_loss: 1.0081 - 2s/epoch - 7ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1661 - val_loss: 0.5791 - 2s/epoch - 7ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1777 - val_loss: 0.4895 - 2s/epoch - 7ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1431 - val_loss: 0.9121 - 2s/epoch - 7ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1480 - val_loss: 0.5217 - 2s/epoch - 7ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1241 - val_loss: 0.2229 - 2s/epoch - 7ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1373 - val_loss: 0.7155 - 2s/epoch - 7ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1303 - val_loss: 0.8493 - 2s/epoch - 7ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1020 - val_loss: 0.7836 - 2s/epoch - 7ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1143 - val_loss: 0.7413 - 2s/epoch - 7ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1247 - val_loss: 0.6011 - 2s/epoch - 7ms/step
El entrenamiento del modelo 6 se ha completado y el historial ha sido guardado en 'history_at_LSTM28_model_5.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.06510
306/306 - 4s - loss: 7.2271 - val_loss: 7.1304 - 4s/epoch - 14ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.06510
306/306 - 2s - loss: 2.0279 - val_loss: 2.4130 - 2s/epoch - 7ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.3425 - val_loss: 1.3421 - 2s/epoch - 6ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2383 - val_loss: 1.3187 - 2s/epoch - 6ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2170 - val_loss: 0.6831 - 2s/epoch - 6ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2289 - val_loss: 0.6890 - 2s/epoch - 6ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2101 - val_loss: 0.5968 - 2s/epoch - 6ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1966 - val_loss: 0.6034 - 2s/epoch - 6ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1499 - val_loss: 0.7728 - 2s/epoch - 6ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1487 - val_loss: 0.7618 - 2s/epoch - 6ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1680 - val_loss: 0.2888 - 2s/epoch - 6ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1395 - val_loss: 0.5835 - 2s/epoch - 6ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1359 - val_loss: 0.3277 - 2s/epoch - 6ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1393 - val_loss: 0.3290 - 2s/epoch - 6ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1563 - val_loss: 0.8062 - 2s/epoch - 6ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1211 - val_loss: 0.3436 - 2s/epoch - 6ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1137 - val_loss: 0.5059 - 2s/epoch - 6ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1301 - val_loss: 0.7280 - 2s/epoch - 6ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1022 - val_loss: 0.5174 - 2s/epoch - 6ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.0977 - val_loss: 1.0941 - 2s/epoch - 6ms/step
El entrenamiento del modelo 7 se ha completado y el historial ha sido guardado en 'history_at_LSTM28_model_6.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.06510
306/306 - 3s - loss: 6.1163 - val_loss: 5.9017 - 3s/epoch - 10ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.7892 - val_loss: 2.1290 - 2s/epoch - 6ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.3041 - val_loss: 1.0985 - 2s/epoch - 6ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2252 - val_loss: 0.8306 - 2s/epoch - 6ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2228 - val_loss: 0.6152 - 2s/epoch - 6ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2169 - val_loss: 0.4580 - 2s/epoch - 6ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1682 - val_loss: 0.5815 - 2s/epoch - 6ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1720 - val_loss: 0.5088 - 2s/epoch - 6ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1587 - val_loss: 0.5358 - 2s/epoch - 7ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1391 - val_loss: 1.2919 - 2s/epoch - 6ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1735 - val_loss: 0.3471 - 2s/epoch - 6ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1557 - val_loss: 0.4321 - 2s/epoch - 6ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1392 - val_loss: 0.3686 - 2s/epoch - 6ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1331 - val_loss: 0.6006 - 2s/epoch - 6ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1628 - val_loss: 0.8209 - 2s/epoch - 6ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1242 - val_loss: 1.2019 - 2s/epoch - 6ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1547 - val_loss: 0.5072 - 2s/epoch - 6ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1155 - val_loss: 0.6208 - 2s/epoch - 6ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1320 - val_loss: 0.6284 - 2s/epoch - 6ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.0969 - val_loss: 0.6774 - 2s/epoch - 6ms/step
El entrenamiento del modelo 8 se ha completado y el historial ha sido guardado en 'history_at_LSTM28_model_7.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.06510
306/306 - 3s - loss: 6.6160 - val_loss: 6.4527 - 3s/epoch - 10ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.8828 - val_loss: 2.1775 - 2s/epoch - 6ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2851 - val_loss: 1.0850 - 2s/epoch - 6ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2433 - val_loss: 0.8386 - 2s/epoch - 6ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2276 - val_loss: 0.7092 - 2s/epoch - 6ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2161 - val_loss: 0.6969 - 2s/epoch - 6ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1666 - val_loss: 0.9436 - 2s/epoch - 6ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1888 - val_loss: 0.6267 - 2s/epoch - 6ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1748 - val_loss: 0.5792 - 2s/epoch - 6ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1887 - val_loss: 1.0811 - 2s/epoch - 6ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1813 - val_loss: 0.4728 - 2s/epoch - 6ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1918 - val_loss: 0.5747 - 2s/epoch - 6ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1747 - val_loss: 0.9403 - 2s/epoch - 6ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1372 - val_loss: 0.6697 - 2s/epoch - 6ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1581 - val_loss: 0.7629 - 2s/epoch - 6ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1628 - val_loss: 0.5432 - 2s/epoch - 6ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1555 - val_loss: 0.4328 - 2s/epoch - 6ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1188 - val_loss: 0.6756 - 2s/epoch - 6ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1059 - val_loss: 0.8715 - 2s/epoch - 6ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1204 - val_loss: 0.9095 - 2s/epoch - 6ms/step
El entrenamiento del modelo 9 se ha completado y el historial ha sido guardado en 'history_at_LSTM28_model_8.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.06510
306/306 - 3s - loss: 6.9389 - val_loss: 6.9731 - 3s/epoch - 10ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.9417 - val_loss: 2.2452 - 2s/epoch - 6ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2981 - val_loss: 0.9966 - 2s/epoch - 6ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1852 - val_loss: 0.7479 - 2s/epoch - 6ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2069 - val_loss: 0.9490 - 2s/epoch - 6ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2194 - val_loss: 0.7657 - 2s/epoch - 6ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1790 - val_loss: 0.8154 - 2s/epoch - 6ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2019 - val_loss: 0.4355 - 2s/epoch - 7ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1885 - val_loss: 0.7601 - 2s/epoch - 6ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1716 - val_loss: 0.5251 - 2s/epoch - 7ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1581 - val_loss: 0.6624 - 2s/epoch - 6ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1566 - val_loss: 0.6160 - 2s/epoch - 6ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1213 - val_loss: 0.4384 - 2s/epoch - 7ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1437 - val_loss: 0.8376 - 2s/epoch - 6ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1699 - val_loss: 0.5878 - 2s/epoch - 7ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1261 - val_loss: 0.3991 - 2s/epoch - 7ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1373 - val_loss: 0.5622 - 2s/epoch - 6ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1179 - val_loss: 0.5386 - 2s/epoch - 7ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1120 - val_loss: 0.9856 - 2s/epoch - 6ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1065 - val_loss: 0.8634 - 2s/epoch - 7ms/step
El entrenamiento del modelo 10 se ha completado y el historial ha sido guardado en 'history_at_LSTM28_model_9.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.06510
306/306 - 3s - loss: 7.3363 - val_loss: 7.4014 - 3s/epoch - 11ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.06510
306/306 - 2s - loss: 2.1087 - val_loss: 2.5606 - 2s/epoch - 7ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.3541 - val_loss: 1.0167 - 2s/epoch - 7ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2393 - val_loss: 0.8631 - 2s/epoch - 7ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2072 - val_loss: 0.6873 - 2s/epoch - 7ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1944 - val_loss: 0.4899 - 2s/epoch - 6ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1777 - val_loss: 0.6574 - 2s/epoch - 6ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1667 - val_loss: 0.7128 - 2s/epoch - 6ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1886 - val_loss: 0.7616 - 2s/epoch - 6ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1710 - val_loss: 0.4117 - 2s/epoch - 6ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1480 - val_loss: 1.0593 - 2s/epoch - 6ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1745 - val_loss: 0.7732 - 2s/epoch - 6ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1647 - val_loss: 0.8009 - 2s/epoch - 6ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1406 - val_loss: 0.7819 - 2s/epoch - 6ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1277 - val_loss: 0.2483 - 2s/epoch - 6ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1157 - val_loss: 0.5514 - 2s/epoch - 6ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1126 - val_loss: 0.5245 - 2s/epoch - 6ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1247 - val_loss: 0.5349 - 2s/epoch - 6ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1283 - val_loss: 0.4340 - 2s/epoch - 6ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1198 - val_loss: 0.2635 - 2s/epoch - 6ms/step
El entrenamiento del modelo 11 se ha completado y el historial ha sido guardado en 'history_at_LSTM28_model_10.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.06510
306/306 - 3s - loss: 6.8895 - val_loss: 6.7842 - 3s/epoch - 10ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.9924 - val_loss: 2.3289 - 2s/epoch - 7ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.3607 - val_loss: 1.1903 - 2s/epoch - 6ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2816 - val_loss: 0.7368 - 2s/epoch - 6ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2605 - val_loss: 1.4995 - 2s/epoch - 6ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2106 - val_loss: 1.1060 - 2s/epoch - 6ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2071 - val_loss: 1.0814 - 2s/epoch - 6ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1878 - val_loss: 0.6868 - 2s/epoch - 7ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1851 - val_loss: 0.4679 - 2s/epoch - 6ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1423 - val_loss: 0.8036 - 2s/epoch - 6ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1495 - val_loss: 0.4932 - 2s/epoch - 6ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1471 - val_loss: 0.7090 - 2s/epoch - 7ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1621 - val_loss: 0.6092 - 2s/epoch - 6ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1322 - val_loss: 0.6305 - 2s/epoch - 6ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1325 - val_loss: 0.7910 - 2s/epoch - 6ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1458 - val_loss: 0.7030 - 2s/epoch - 6ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.0926 - val_loss: 0.1922 - 2s/epoch - 6ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.0830 - val_loss: 0.5970 - 2s/epoch - 6ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.0771 - val_loss: 0.5802 - 2s/epoch - 6ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1052 - val_loss: 0.3837 - 2s/epoch - 6ms/step
El entrenamiento del modelo 12 se ha completado y el historial ha sido guardado en 'history_at_LSTM28_model_11.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.06510
306/306 - 3s - loss: 8.4500 - val_loss: 8.7846 - 3s/epoch - 10ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.06510
306/306 - 2s - loss: 2.5019 - val_loss: 3.1387 - 2s/epoch - 7ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.4196 - val_loss: 1.3810 - 2s/epoch - 7ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2708 - val_loss: 0.8895 - 2s/epoch - 7ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2371 - val_loss: 0.7904 - 2s/epoch - 7ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2270 - val_loss: 0.7502 - 2s/epoch - 7ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1750 - val_loss: 0.8076 - 2s/epoch - 7ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1891 - val_loss: 0.7043 - 2s/epoch - 7ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1748 - val_loss: 1.3279 - 2s/epoch - 7ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1660 - val_loss: 0.5368 - 2s/epoch - 7ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1601 - val_loss: 0.5168 - 2s/epoch - 7ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1503 - val_loss: 0.6635 - 2s/epoch - 7ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1118 - val_loss: 1.0881 - 2s/epoch - 7ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1603 - val_loss: 0.4644 - 2s/epoch - 7ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1465 - val_loss: 0.6079 - 2s/epoch - 7ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1039 - val_loss: 0.3656 - 2s/epoch - 7ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1189 - val_loss: 0.8142 - 2s/epoch - 7ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1231 - val_loss: 0.7524 - 2s/epoch - 7ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1134 - val_loss: 0.5564 - 2s/epoch - 7ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1020 - val_loss: 0.4772 - 2s/epoch - 7ms/step
El entrenamiento del modelo 13 se ha completado y el historial ha sido guardado en 'history_at_LSTM28_model_12.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.06510
306/306 - 3s - loss: 7.1928 - val_loss: 7.4358 - 3s/epoch - 10ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.06510
306/306 - 2s - loss: 2.1065 - val_loss: 2.5417 - 2s/epoch - 7ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.3314 - val_loss: 1.1352 - 2s/epoch - 7ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2451 - val_loss: 0.8430 - 2s/epoch - 7ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2127 - val_loss: 0.8258 - 2s/epoch - 6ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1980 - val_loss: 1.1839 - 2s/epoch - 7ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1751 - val_loss: 0.4222 - 2s/epoch - 7ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1856 - val_loss: 0.7571 - 2s/epoch - 6ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1591 - val_loss: 0.3765 - 2s/epoch - 7ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1382 - val_loss: 0.8703 - 2s/epoch - 6ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1566 - val_loss: 0.6531 - 2s/epoch - 7ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1637 - val_loss: 1.0157 - 2s/epoch - 6ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1483 - val_loss: 0.6543 - 2s/epoch - 6ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1230 - val_loss: 1.1569 - 2s/epoch - 7ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1218 - val_loss: 0.7120 - 2s/epoch - 7ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1401 - val_loss: 0.3919 - 2s/epoch - 7ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1236 - val_loss: 0.7679 - 2s/epoch - 7ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1234 - val_loss: 0.5686 - 2s/epoch - 7ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1058 - val_loss: 0.6046 - 2s/epoch - 8ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.0958 - val_loss: 1.3687 - 2s/epoch - 7ms/step
El entrenamiento del modelo 14 se ha completado y el historial ha sido guardado en 'history_at_LSTM28_model_13.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.06510
306/306 - 3s - loss: 7.0992 - val_loss: 6.9412 - 3s/epoch - 11ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.9664 - val_loss: 2.3431 - 2s/epoch - 7ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.3055 - val_loss: 1.2204 - 2s/epoch - 7ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2269 - val_loss: 0.7551 - 2s/epoch - 7ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2091 - val_loss: 0.6739 - 2s/epoch - 7ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1822 - val_loss: 0.7142 - 2s/epoch - 7ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2080 - val_loss: 0.6342 - 2s/epoch - 7ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1645 - val_loss: 0.9251 - 2s/epoch - 7ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1951 - val_loss: 0.6351 - 2s/epoch - 7ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1936 - val_loss: 0.9168 - 2s/epoch - 7ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1632 - val_loss: 0.9754 - 2s/epoch - 7ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1627 - val_loss: 0.7957 - 2s/epoch - 7ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1454 - val_loss: 0.8145 - 2s/epoch - 7ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1575 - val_loss: 0.3253 - 2s/epoch - 7ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1298 - val_loss: 0.4607 - 2s/epoch - 7ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1427 - val_loss: 1.2704 - 2s/epoch - 7ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1240 - val_loss: 0.6492 - 2s/epoch - 7ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1237 - val_loss: 0.7877 - 2s/epoch - 6ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.0905 - val_loss: 0.2337 - 2s/epoch - 7ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1109 - val_loss: 1.0393 - 2s/epoch - 7ms/step
El entrenamiento del modelo 15 se ha completado y el historial ha sido guardado en 'history_at_LSTM28_model_14.joblib'.
Epoch 1/20
Epoch 1: val_loss did not improve from 0.06510
306/306 - 4s - loss: 7.2793 - val_loss: 7.3294 - 4s/epoch - 13ms/step
Epoch 2/20
Epoch 2: val_loss did not improve from 0.06510
306/306 - 2s - loss: 2.0532 - val_loss: 2.3122 - 2s/epoch - 6ms/step
Epoch 3/20
Epoch 3: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2949 - val_loss: 1.1371 - 2s/epoch - 6ms/step
Epoch 4/20
Epoch 4: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2403 - val_loss: 0.8834 - 2s/epoch - 7ms/step
Epoch 5/20
Epoch 5: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.2219 - val_loss: 0.5835 - 2s/epoch - 6ms/step
Epoch 6/20
Epoch 6: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1960 - val_loss: 0.3522 - 2s/epoch - 7ms/step
Epoch 7/20
Epoch 7: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1918 - val_loss: 0.4669 - 2s/epoch - 6ms/step
Epoch 8/20
Epoch 8: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1926 - val_loss: 0.2599 - 2s/epoch - 6ms/step
Epoch 9/20
Epoch 9: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1763 - val_loss: 0.6408 - 2s/epoch - 6ms/step
Epoch 10/20
Epoch 10: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1536 - val_loss: 0.5140 - 2s/epoch - 6ms/step
Epoch 11/20
Epoch 11: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1499 - val_loss: 0.7299 - 2s/epoch - 7ms/step
Epoch 12/20
Epoch 12: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1387 - val_loss: 0.5241 - 2s/epoch - 7ms/step
Epoch 13/20
Epoch 13: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1530 - val_loss: 0.2742 - 2s/epoch - 6ms/step
Epoch 14/20
Epoch 14: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1514 - val_loss: 0.4823 - 2s/epoch - 7ms/step
Epoch 15/20
Epoch 15: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1562 - val_loss: 0.9326 - 2s/epoch - 6ms/step
Epoch 16/20
Epoch 16: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1401 - val_loss: 0.4993 - 2s/epoch - 7ms/step
Epoch 17/20
Epoch 17: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1394 - val_loss: 0.5373 - 2s/epoch - 6ms/step
Epoch 18/20
Epoch 18: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1237 - val_loss: 0.6216 - 2s/epoch - 6ms/step
Epoch 19/20
Epoch 19: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1097 - val_loss: 0.6531 - 2s/epoch - 6ms/step
Epoch 20/20
Epoch 20: val_loss did not improve from 0.06510
306/306 - 2s - loss: 1.1283 - val_loss: 1.0690 - 2s/epoch - 7ms/step
El entrenamiento del modelo 16 se ha completado y el historial ha sido guardado en 'history_at_LSTM28_model_15.joblib'.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_at_LSTM28_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best28_lstm_at = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best28_lstm_at = load_model(best_model_path) # Cargar el modelo
if best28_lstm_at is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_at_LSTM28_weights.19-0.0651.keras con val_loss: 0.0651
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
print("Archivos en el directorio 'keras_models':", files)
Archivos en el directorio 'keras_models': ['PRSA_data_vola14_MLP7_weights.04-0.0018.keras', 'PRSA_data_price_MLP21_weights.01-3579.4128.keras', 'PRSA_data_price_LSTM21_weights.01-43062.3047.keras', 'PRSA_data_price_LSTM28_weights.02-42892.2305.keras', 'PRSA_data_price_LSTM_weights.19-67016.3047.keras', 'PRSA_data_at_LSTM28_weights.19-0.0651.keras', 'PRSA_data_vola14_MLP21_weights.20-0.0011.keras', 'PRSA_data_price_LSTM14_weights.06-51394.8750.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.07-0.0033.keras', 'PRSA_data_price_LSTM21_weights.02-43052.0469.keras', 'PRSA_data_at_MLP7_weights.80-0.0780.keras', 'PRSA_data_vola21_LSTM28_weights.02-0.0022.keras', 'PRSA_data_vola21_LSTM14_weights.04-0.0011.keras', 'PRSA_data_vola7_LSTM21_weights.18-0.0029.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0013.keras', 'PRSA_data_vola28_MLP14_weights.04-0.0013.keras', 'PRSA_data_price_MLP14_weights.52-626.2623.keras', 'PRSA_data_vola7_MLP7_weights.15-0.0041.keras', 'PRSA_data_price_LSTM14_weights.13-51325.8516.keras', 'PRSA_data_vola14_MLP7_weights.02-0.0023.keras', 'PRSA_data_price_LSTM21_weights.11-42962.6562.keras', 'PRSA_data_vola7_LSTM28_weights.10-0.0037.keras', 'PRSA_data_at_LSTM_weights.04-1.2071.keras', 'PRSA_data_price_MLP7_weights.50-1342.5273.keras', 'PRSA_data_price_MLP14_weights.03-5108.7837.keras', 'PRSA_data_vola28_MLP7_weights.14-0.0012.keras', 'PRSA_data_at_MLP7_weights.11-2.4883.keras', 'PRSA_data_price_LSTM28_weights.08-42832.5430.keras', 'PRSA_data_price_MLP28_weights.01-9242.6553.keras', 'PRSA_data_vola21_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.16-1013.2251.keras', 'PRSA_data_price_MLP28_weights.13-1042.5178.keras', 'PRSA_data_vola28_MLP28_weights.34-0.0006.keras', 'PRSA_data_at_MLP7_weights.01-19.6635.keras', 'PRSA_data_at_MLP7_weights.19-1.8379.keras', 'PRSA_data_at_MLP7_weights.12-2.3991.keras', 'PRSA_data_price_MLP7_weights.15-1346.3275.keras', 'PRSA_data_vola14_MLP28_weights.02-0.0024.keras', 'PRSA_data_at_LSTM21_weights.05-0.7513.keras', 'PRSA_data_vola14_MLP28_weights.07-0.0017.keras', 'PRSA_data_at_MLP28_weights.05-0.0930.keras', 'PRSA_data_at_LSTM28_weights.02-2.0782.keras', 'PRSA_data_price_LSTM14_weights.20-51256.7266.keras', 'PRSA_data_vola21_LSTM21_weights.05-0.0013.keras', 'PRSA_data_price_MLP14_weights.12-1304.7009.keras', 'PRSA_data_vola7_LSTM14_weights.09-0.0044.keras', 'PRSA_data_vola28_MLP28_weights.50-0.0006.keras', 'PRSA_data_vola7_MLP14_weights.15-0.0020.keras', 'PRSA_data_price_MLP14_weights.08-1386.5365.keras', 'PRSA_data_price_LSTM21_weights.09-42982.2383.keras', 'PRSA_data_price_MLP28_weights.11-7366.8994.keras', 'PRSA_data_vola14_MLP7_weights.18-0.0015.keras', 'PRSA_data_price_MLP7_weights.14-1734.7238.keras', 'PRSA_data_at_LSTM28_weights.03-1.3148.keras', 'PRSA_data_price_LSTM14_weights.15-51306.2070.keras', 'PRSA_data_vola21_MLP28_weights.25-0.0010.keras', 'PRSA_data_at_LSTM28_weights.10-0.2820.keras', 'PRSA_data_at_LSTM14_weights.02-2.2175.keras', 'PRSA_data_vola28_LSTM7_weights.08-0.0014.keras', 'PRSA_data_price_MLP28_weights.10-1107.4047.keras', 'PRSA_data_vola28_MLP7_weights.01-0.0068.keras', 'PRSA_data_vola21_LSTM21_weights.03-0.0015.keras', 'PRSA_data_price_MLP7_weights.01-9253.3047.keras', 'PRSA_data_vola21_MLP14_weights.31-0.0007.keras', 'PRSA_data_price_LSTM28_weights.12-42793.6172.keras', 'PRSA_data_price_LSTM14_weights.09-51365.0000.keras', 'PRSA_data_vola28_MLP28_weights.42-0.0006.keras', 'PRSA_data_price_MLP7_weights.05-4792.5054.keras', 'PRSA_data_vola21_LSTM_weights.01-0.0024.keras', 'PRSA_data_price_LSTM28_weights.17-42745.0352.keras', 'PRSA_data_vola28_MLP28_weights.45-0.0006.keras', 'PRSA_data_vola28_MLP14_weights.08-0.0010.keras', 'PRSA_data_at_LSTM_weights.11-0.6498.keras', 'PRSA_data_vola28_MLP28_weights.39-0.0007.keras', 'PRSA_data_at_MLP21_weights.11-0.0326.keras', 'PRSA_data_price_LSTM_weights.20-67006.4141.keras', 'PRSA_data_vola28_MLP7_weights.06-0.0014.keras', 'PRSA_data_at_LSTM14_weights.06-0.8145.keras', 'PRSA_data_at_MLP28_weights.01-2.1902.keras', 'PRSA_data_price_MLP14_weights.46-814.1992.keras', '.DS_Store', 'PRSA_data_at_LSTM21_weights.01-5.9452.keras', 'PRSA_data_vola28_LSTM28_weights.12-0.0006.keras', 'PRSA_data_price_LSTM14_weights.18-51276.5625.keras', 'PRSA_data_vola21_LSTM14_weights.01-0.0078.keras', 'PRSA_data_vola14_MLP21_weights.04-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM14_weights.02-0.0011.keras', 'PRSA_data_at_MLP7_weights.09-2.9793.keras', 'PRSA_data_vola21_MLP14_weights.06-0.0007.keras', 'PRSA_data_vola14_MLP21_weights.78-0.0010.keras', 'PRSA_data_price_MLP14_weights.64-711.9107.keras', 'PRSA_data_vola14_MLP28_weights.31-0.0011.keras', 'PRSA_data_vola14_LSTM_weights.08-0.0020.keras', 'PRSA_data_vola14_LSTM21_weights.02-0.0018.keras', 'PRSA_data_at_LSTM21_weights.02-2.0083.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0014.keras', 'PRSA_data_vola14_MLP14_weights.29-0.0005.keras', 'PRSA_data_price_LSTM_weights.03-67175.7734.keras', 'PRSA_data_vola14_LSTM_weights.01-0.0068.keras', 'PRSA_data_price_LSTM28_weights.11-42803.3086.keras', 'PRSA_data_price_MLP28_weights.15-7107.3306.keras', 'PRSA_data_vola14_MLP21_weights.49-0.0010.keras', 'PRSA_data_vola28_MLP7_weights.07-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0010.keras', 'PRSA_data_at_LSTM14_weights.15-0.4082.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0011.keras', 'PRSA_data_at_LSTM21_weights.19-0.3670.keras', 'PRSA_data_vola14_MLP14_weights.06-0.0006.keras', 'PRSA_data_vola14_LSTM21_weights.06-0.0012.keras', 'PRSA_data_price_LSTM_weights.17-67036.2891.keras', 'PRSA_data_vola28_MLP7_weights.03-0.0018.keras', 'PRSA_data_vola7_MLP28_weights.02-0.0039.keras', 'PRSA_data_vola7_LSTM28_weights.16-0.0035.keras', 'PRSA_data_vola21_MLP7_weights.20-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.33-0.0009.keras', 'PRSA_data_price_MLP21_weights.64-870.7020.keras', 'PRSA_data_vola21_MLP14_weights.22-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.14-0.0020.keras', 'PRSA_data_price_LSTM21_weights.17-42903.9648.keras', 'PRSA_data_vola21_MLP14_weights.01-0.0021.keras', 'PRSA_data_vola21_LSTM7_weights.16-0.0011.keras', 'PRSA_data_at_LSTM21_weights.17-0.5070.keras', 'PRSA_data_at_LSTM14_weights.11-0.6656.keras', 'PRSA_data_vola28_MLP7_weights.02-0.0019.keras', 'PRSA_data_vola14_MLP28_weights.05-0.0018.keras', 'PRSA_data_vola14_LSTM14_weights.08-0.0005.keras', 'PRSA_data_at_MLP21_weights.02-0.8171.keras', 'PRSA_data_price_MLP28_weights.02-8134.2202.keras', 'PRSA_data_price_LSTM28_weights.06-42852.3320.keras', 'PRSA_data_vola7_MLP14_weights.02-0.0036.keras', 'PRSA_data_vola14_LSTM28_weights.01-0.0026.keras', 'PRSA_data_at_MLP7_weights.04-9.8837.keras', 'PRSA_data_price_MLP28_weights.16-1274.4368.keras', 'PRSA_data_price_LSTM21_weights.07-43002.0078.keras', 'PRSA_data_vola7_MLP28_weights.23-0.0032.keras', 'PRSA_data_price_LSTM_weights.01-67196.3359.keras', 'PRSA_data_at_MLP28_weights.02-1.9156.keras', 'PRSA_data_vola14_MLP21_weights.02-0.0016.keras', 'PRSA_data_vola28_MLP14_weights.14-0.0007.keras', 'PRSA_data_price_LSTM28_weights.07-42842.4375.keras', 'PRSA_data_price_MLP14_weights.37-825.1805.keras', 'PRSA_data_vola7_MLP14_weights.07-0.0021.keras', 'PRSA_data_price_LSTM14_weights.04-51414.8945.keras', 'PRSA_data_price_MLP7_weights.42-1446.1339.keras', 'PRSA_data_price_MLP21_weights.57-932.6497.keras', 'PRSA_data_price_LSTM14_weights.19-51266.6133.keras', 'PRSA_data_vola21_MLP14_weights.34-0.0008.keras', 'PRSA_data_price_MLP14_weights.01-10485.8262.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0012.keras', 'PRSA_data_at_LSTM_weights.02-2.4966.keras', 'PRSA_data_at_LSTM28_weights.08-0.3981.keras', 'PRSA_data_vola21_LSTM28_weights.15-0.0009.keras', 'PRSA_data_price_LSTM_weights.20-67006.3047.keras', 'PRSA_data_price_LSTM_weights.14-67066.1016.keras', 'PRSA_data_price_LSTM21_weights.19-42884.2422.keras', 'PRSA_data_vola28_LSTM21_weights.18-0.0008.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0013.keras', 'PRSA_data_price_LSTM28_weights.20-42714.5508.keras', 'PRSA_data_at_LSTM14_weights.13-0.5609.keras', 'PRSA_data_vola28_LSTM28_weights.04-0.0009.keras', 'PRSA_data_vola14_MLP21_weights.01-0.0118.keras', 'PRSA_data_vola7_MLP21_weights.02-0.0032.keras', 'PRSA_data_at_MLP7_weights.74-0.2838.keras', 'PRSA_data_price_MLP28_weights.17-1217.0461.keras', 'PRSA_data_at_MLP21_weights.16-1.1973.keras', 'PRSA_data_vola7_MLP7_weights.02-0.0036.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0015.keras', 'PRSA_data_vola14_LSTM14_weights.04-0.0006.keras', 'PRSA_data_vola28_LSTM28_weights.18-0.0006.keras', 'PRSA_data_price_MLP21_weights.74-805.9489.keras', 'PRSA_data_vola21_MLP14_weights.02-0.0011.keras', 'PRSA_data_vola28_MLP21_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM_weights.11-0.0014.keras', 'PRSA_data_vola7_LSTM14_weights.14-0.0036.keras', 'PRSA_data_at_MLP28_weights.03-1.7712.keras', 'PRSA_data_price_LSTM21_weights.20-42874.3555.keras', 'PRSA_data_vola28_LSTM7_weights.20-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.04-0.0009.keras', 'PRSA_data_at_MLP7_weights.08-3.6716.keras', 'PRSA_data_at_MLP21_weights.04-0.0734.keras', 'PRSA_data_vola21_LSTM28_weights.03-0.0021.keras', 'PRSA_data_vola21_MLP14_weights.35-0.0008.keras', 'PRSA_data_vola28_LSTM7_weights.11-0.0012.keras', 'PRSA_data_price_LSTM28_weights.04-42872.1953.keras', 'PRSA_data_at_LSTM_weights.08-0.4803.keras', 'PRSA_data_vola28_MLP14_weights.05-0.0010.keras', 'PRSA_data_price_LSTM14_weights.14-51316.0625.keras', 'PRSA_data_vola7_MLP14_weights.01-0.0101.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0015.keras', 'PRSA_data_vola7_LSTM_weights.01-0.0040.keras', 'PRSA_data_vola14_MLP21_weights.09-0.0011.keras', 'PRSA_data_price_LSTM21_weights.05-43021.9570.keras', 'PRSA_data_vola28_MLP28_weights.03-0.0007.keras', 'PRSA_data_vola7_LSTM14_weights.10-0.0042.keras', 'PRSA_data_price_LSTM_weights.07-67135.3359.keras', 'PRSA_data_vola7_MLP14_weights.18-0.0020.keras', 'PRSA_data_price_LSTM21_weights.18-42894.1055.keras', 'PRSA_data_vola7_MLP7_weights.01-0.0042.keras', 'PRSA_data_vola14_LSTM_weights.05-0.0024.keras', 'PRSA_data_vola21_MLP14_weights.27-0.0008.keras', 'PRSA_data_vola28_MLP28_weights.02-0.0018.keras', 'PRSA_data_vola21_LSTM7_weights.03-0.0017.keras', 'PRSA_data_price_LSTM_weights.10-67105.4297.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0010.keras', 'PRSA_data_vola7_MLP21_weights.03-0.0030.keras', 'PRSA_data_at_MLP7_weights.06-5.3724.keras', 'PRSA_data_at_LSTM_weights.05-1.1226.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0026.keras', 'PRSA_data_vola21_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola14_MLP21_weights.05-0.0012.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.08-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.47-0.0008.keras', 'PRSA_data_price_LSTM14_weights.16-51296.3633.keras', 'PRSA_data_at_MLP7_weights.02-16.3421.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0030.keras', 'PRSA_data_vola21_MLP21_weights.09-0.0009.keras', 'PRSA_data_at_LSTM14_weights.01-6.2168.keras', 'PRSA_data_vola28_LSTM21_weights.01-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.16-0.0009.keras', 'PRSA_data_vola21_LSTM28_weights.07-0.0016.keras', 'PRSA_data_vola21_MLP28_weights.19-0.0010.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0009.keras', 'PRSA_data_vola14_MLP7_weights.09-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.02-0.0011.keras', 'PRSA_data_vola28_LSTM14_weights.03-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.09-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.09-0.0012.keras', 'PRSA_data_vola28_MLP21_weights.65-0.0007.keras', 'PRSA_data_vola21_MLP28_weights.15-0.0011.keras', 'PRSA_data_at_LSTM28_weights.04-0.7851.keras', 'PRSA_data_at_MLP7_weights.42-0.8694.keras', 'PRSA_data_vola21_LSTM28_weights.18-0.0008.keras', 'PRSA_data_vola21_MLP14_weights.04-0.0008.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0040.keras', 'PRSA_data_vola21_LSTM14_weights.12-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0029.keras', 'PRSA_data_vola21_MLP28_weights.01-0.0026.keras', 'PRSA_data_price_LSTM_weights.13-67075.9453.keras', 'PRSA_data_vola14_MLP7_weights.01-0.0041.keras', 'PRSA_data_price_MLP21_weights.02-1476.1034.keras', 'PRSA_data_vola14_LSTM28_weights.19-0.0014.keras', 'PRSA_data_price_MLP14_weights.96-663.0834.keras', 'PRSA_data_vola7_MLP21_weights.94-0.0024.keras', 'PRSA_data_price_LSTM21_weights.03-43041.9570.keras', 'PRSA_data_price_MLP7_weights.12-3798.9531.keras', 'PRSA_data_price_LSTM28_weights.10-42813.0352.keras', 'PRSA_data_vola14_MLP28_weights.20-0.0014.keras', 'PRSA_data_vola7_MLP21_weights.04-0.0027.keras', 'PRSA_data_vola7_LSTM28_weights.18-0.0035.keras', 'PRSA_data_at_MLP7_weights.10-2.7632.keras', 'PRSA_data_at_LSTM21_weights.18-0.3561.keras', 'PRSA_data_price_LSTM_weights.06-67145.4141.keras', 'PRSA_data_price_LSTM28_weights.13-42783.9805.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM28_weights.01-42902.4570.keras', 'PRSA_data_price_LSTM_weights.20-67005.9609.keras', 'PRSA_data_vola7_LSTM21_weights.04-0.0034.keras', 'PRSA_data_at_MLP14_weights.03-0.0461.keras', 'PRSA_data_at_LSTM21_weights.07-0.5113.keras', 'PRSA_data_vola28_MLP28_weights.01-0.0042.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0455.keras', 'PRSA_data_price_MLP28_weights.17-1182.9105.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0018.keras', 'PRSA_data_vola28_MLP14_weights.38-0.0006.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0013.keras', 'PRSA_data_vola7_LSTM21_weights.01-0.0038.keras', 'PRSA_data_price_LSTM14_weights.17-51286.4570.keras', 'PRSA_data_price_LSTM28_weights.16-42754.8008.keras', 'PRSA_data_price_LSTM14_weights.11-51345.4023.keras', 'PRSA_data_price_MLP14_weights.22-638.4409.keras', 'PRSA_data_vola7_MLP28_weights.32-0.0029.keras', 'PRSA_data_price_MLP7_weights.48-1648.1864.keras', 'PRSA_data_vola28_LSTM28_weights.10-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.03-0.0021.keras', 'PRSA_data_vola21_LSTM21_weights.13-0.0010.keras', 'PRSA_data_at_MLP7_weights.07-4.2627.keras', 'PRSA_data_vola7_LSTM21_weights.17-0.0029.keras', 'PRSA_data_vola21_LSTM28_weights.08-0.0011.keras', 'PRSA_data_price_MLP21_weights.05-1072.5081.keras', 'PRSA_data_at_LSTM21_weights.06-0.6219.keras', 'PRSA_data_vola7_LSTM21_weights.05-0.0030.keras', 'PRSA_data_price_MLP14_weights.29-856.8320.keras', 'PRSA_data_vola14_LSTM21_weights.11-0.0011.keras', 'PRSA_data_price_LSTM21_weights.10-42972.4219.keras', 'PRSA_data_at_LSTM21_weights.19-0.2744.keras', 'PRSA_data_price_LSTM_weights.02-67185.9453.keras', 'PRSA_data_vola28_MLP14_weights.21-0.0007.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.10-0.0009.keras', 'PRSA_data_price_LSTM28_weights.15-42764.5898.keras', 'PRSA_data_at_LSTM14_weights.07-0.7059.keras', 'PRSA_data_vola21_LSTM21_weights.01-0.0048.keras', 'PRSA_data_at_LSTM14_weights.03-1.3422.keras', 'PRSA_data_vola14_LSTM14_weights.02-0.0015.keras', 'PRSA_data_vola14_MLP14_weights.02-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.02-0.0034.keras', 'PRSA_data_vola7_MLP21_weights.21-0.0025.keras', 'PRSA_data_at_MLP7_weights.18-1.8396.keras', 'PRSA_data_price_LSTM_weights.11-67095.6172.keras', 'PRSA_data_at_LSTM14_weights.12-0.6378.keras', 'PRSA_data_vola21_LSTM21_weights.07-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.45-0.0030.keras', 'PRSA_data_at_MLP7_weights.26-0.8897.keras', 'PRSA_data_vola21_MLP7_weights.01-0.0014.keras', 'PRSA_data_price_MLP14_weights.19-982.5739.keras', 'PRSA_data_vola14_LSTM_weights.16-0.0017.keras', 'PRSA_data_price_MLP21_weights.94-813.8954.keras', 'PRSA_data_vola7_LSTM21_weights.20-0.0029.keras', 'PRSA_data_at_LSTM_weights.03-1.4842.keras', 'PRSA_data_vola21_MLP28_weights.02-0.0015.keras', 'PRSA_data_price_LSTM21_weights.12-42952.9453.keras', 'PRSA_data_price_MLP28_weights.17-1019.2816.keras', 'PRSA_data_vola14_LSTM_weights.13-0.0019.keras', 'PRSA_data_vola7_LSTM_weights.02-0.0038.keras', 'PRSA_data_vola7_LSTM28_weights.02-0.0042.keras', 'PRSA_data_at_LSTM28_weights.12-0.0705.keras', 'PRSA_data_at_LSTM14_weights.13-0.2270.keras', 'PRSA_data_price_LSTM_weights.18-67026.2891.keras', 'PRSA_data_vola28_MLP21_weights.66-0.0007.keras', 'PRSA_data_at_MLP14_weights.42-0.0373.keras', 'PRSA_data_vola7_MLP14_weights.38-0.0020.keras', 'PRSA_data_vola7_LSTM21_weights.11-0.0029.keras', 'PRSA_data_vola21_LSTM21_weights.12-0.0010.keras', 'PRSA_data_at_LSTM_weights.14-0.5158.keras', 'PRSA_data_vola14_LSTM21_weights.01-0.0019.keras', 'PRSA_data_vola28_LSTM21_weights.11-0.0009.keras', 'PRSA_data_vola7_LSTM28_weights.05-0.0038.keras', 'PRSA_data_vola14_MLP14_weights.10-0.0005.keras', 'PRSA_data_at_MLP21_weights.03-2.1540.keras', 'PRSA_data_at_LSTM_weights.16-0.5377.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.44-0.0008.keras', 'PRSA_data_at_MLP7_weights.16-1.9941.keras', 'PRSA_data_vola28_LSTM28_weights.01-0.0010.keras', 'PRSA_data_vola28_LSTM14_weights.19-0.0006.keras', 'PRSA_data_price_LSTM21_weights.14-42933.4180.keras', 'PRSA_data_vola14_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.82-828.7976.keras', 'PRSA_data_vola28_MLP21_weights.01-0.0030.keras', 'PRSA_data_price_LSTM14_weights.10-51355.2070.keras', 'PRSA_data_vola21_MLP21_weights.18-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.73-0.0007.keras', 'PRSA_data_vola28_MLP7_weights.20-0.0011.keras', 'PRSA_data_price_LSTM21_weights.13-42943.1719.keras', 'PRSA_data_vola28_MLP28_weights.36-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.04-0.0025.keras', 'PRSA_data_at_MLP14_weights.02-0.2715.keras', 'PRSA_data_price_LSTM14_weights.05-51404.8945.keras', 'PRSA_data_price_LSTM28_weights.19-42725.4297.keras', 'PRSA_data_price_MLP28_weights.01-1561.6656.keras', 'PRSA_data_vola21_MLP7_weights.14-0.0011.keras', 'PRSA_data_vola7_MLP7_weights.23-0.0033.keras', 'PRSA_data_vola14_MLP28_weights.01-0.0026.keras', 'PRSA_data_at_MLP14_weights.29-0.0323.keras', 'PRSA_data_vola7_MLP21_weights.01-0.0033.keras', 'PRSA_data_vola21_LSTM_weights.19-0.0014.keras', 'PRSA_data_price_MLP7_weights.03-5953.8076.keras', 'PRSA_data_price_MLP21_weights.42-948.6628.keras', 'PRSA_data_vola21_LSTM14_weights.07-0.0008.keras', 'PRSA_data_vola14_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola7_LSTM28_weights.13-0.0037.keras', 'PRSA_data_vola21_MLP21_weights.01-0.0016.keras', 'PRSA_data_vola14_LSTM21_weights.08-0.0011.keras', 'PRSA_data_price_LSTM21_weights.06-43011.9570.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0019.keras', 'PRSA_data_vola7_MLP28_weights.16-0.0033.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0008.keras', 'PRSA_data_vola28_MLP14_weights.01-0.0031.keras', 'PRSA_data_vola14_MLP28_weights.11-0.0015.keras', 'PRSA_data_price_LSTM14_weights.03-51425.0000.keras', 'PRSA_data_vola7_LSTM28_weights.04-0.0039.keras', 'PRSA_data_vola28_LSTM7_weights.13-0.0011.keras', 'PRSA_data_price_LSTM14_weights.01-51445.4883.keras', 'PRSA_data_vola28_MLP28_weights.22-0.0006.keras', 'PRSA_data_price_LSTM_weights.09-67115.3359.keras', 'PRSA_data_vola28_LSTM7_weights.14-0.0011.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0050.keras', 'PRSA_data_vola21_MLP28_weights.17-0.0010.keras', 'PRSA_data_vola28_LSTM21_weights.09-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.13-0.0010.keras', 'PRSA_data_vola14_MLP14_weights.01-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.50-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.2184.keras', 'PRSA_data_vola14_LSTM_weights.02-0.0030.keras', 'PRSA_data_vola7_MLP28_weights.39-0.0030.keras', 'PRSA_data_price_LSTM21_weights.04-43031.9414.keras', 'PRSA_data_price_LSTM_weights.15-67056.2109.keras', 'PRSA_data_at_MLP7_weights.20-1.6814.keras', 'PRSA_data_vola28_LSTM7_weights.18-0.0010.keras', 'PRSA_data_at_MLP7_weights.95-0.2130.keras', 'PRSA_data_vola14_LSTM28_weights.03-0.0021.keras', 'PRSA_data_at_LSTM_weights.01-7.5653.keras', 'PRSA_data_at_MLP7_weights.05-7.2110.keras', 'PRSA_data_vola21_LSTM28_weights.20-0.0008.keras', 'PRSA_data_at_LSTM_weights.07-0.8569.keras', 'PRSA_data_vola14_LSTM28_weights.12-0.0014.keras', 'PRSA_data_price_LSTM21_weights.16-42913.7969.keras', 'PRSA_data_at_MLP28_weights.04-1.3772.keras', 'PRSA_data_price_LSTM_weights.08-67125.3047.keras', 'PRSA_data_price_LSTM14_weights.07-51384.8633.keras', 'PRSA_data_price_LSTM_weights.04-67165.6016.keras', 'PRSA_data_vola14_LSTM28_weights.15-0.0014.keras', 'PRSA_data_at_MLP14_weights.01-0.4195.keras', 'PRSA_data_price_LSTM28_weights.09-42822.7578.keras', 'PRSA_data_price_LSTM28_weights.03-42882.1953.keras', 'PRSA_data_at_LSTM14_weights.05-0.8408.keras', 'PRSA_data_vola7_MLP28_weights.08-0.0030.keras', 'PRSA_data_price_MLP14_weights.07-3004.8474.keras', 'PRSA_data_price_LSTM21_weights.15-42923.6367.keras', 'PRSA_data_vola7_MLP21_weights.81-0.0025.keras', 'PRSA_data_price_MLP21_weights.86-819.7106.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.05-0.0012.keras', 'PRSA_data_vola21_MLP14_weights.20-0.0007.keras', 'PRSA_data_vola14_MLP28_weights.42-0.0011.keras', 'PRSA_data_price_LSTM_weights.12-67085.7578.keras', 'PRSA_data_vola28_MLP21_weights.13-0.0008.keras', 'PRSA_data_vola14_MLP28_weights.08-0.0016.keras', 'PRSA_data_at_LSTM28_weights.05-0.5407.keras', 'PRSA_data_at_MLP7_weights.86-0.0883.keras', 'PRSA_data_vola28_LSTM14_weights.01-0.0020.keras', 'PRSA_data_vola21_MLP21_weights.71-0.0010.keras', 'PRSA_data_at_MLP28_weights.09-0.1537.keras', 'PRSA_data_vola7_MLP28_weights.48-0.0030.keras', 'PRSA_data_at_MLP28_weights.12-0.0354.keras', 'PRSA_data_at_LSTM14_weights.04-0.9732.keras', 'PRSA_data_at_LSTM14_weights.15-0.2154.keras', 'PRSA_data_vola14_MLP28_weights.21-0.0013.keras', 'PRSA_data_vola21_MLP14_weights.25-0.0009.keras', 'PRSA_data_vola14_LSTM14_weights.01-0.0027.keras', 'PRSA_data_at_LSTM21_weights.20-0.0855.keras', 'PRSA_data_vola21_LSTM28_weights.01-0.0039.keras', 'PRSA_data_price_MLP14_weights.02-8059.2510.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0013.keras', 'PRSA_data_vola28_MLP7_weights.04-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.02-0.0043.keras', 'PRSA_data_price_MLP14_weights.78-683.9230.keras', 'PRSA_data_price_MLP21_weights.19-1002.5925.keras', 'PRSA_data_vola28_MLP21_weights.92-0.0007.keras', 'PRSA_data_price_LSTM28_weights.14-42774.2500.keras', 'PRSA_data_vola21_MLP21_weights.06-0.0010.keras', 'PRSA_data_vola14_LSTM14_weights.03-0.0010.keras', 'PRSA_data_price_LSTM_weights.16-67046.2422.keras', 'PRSA_data_vola7_LSTM28_weights.07-0.0037.keras', 'PRSA_data_vola28_LSTM7_weights.01-0.0017.keras', 'PRSA_data_vola28_LSTM14_weights.02-0.0008.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0022.keras', 'PRSA_data_at_MLP21_weights.03-0.2039.keras', 'PRSA_data_vola14_LSTM28_weights.02-0.0023.keras', 'PRSA_data_at_MLP7_weights.03-13.2212.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0037.keras', 'PRSA_data_vola14_MLP21_weights.69-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.18-0.0013.keras', 'PRSA_data_vola7_MLP21_weights.49-0.0025.keras', 'PRSA_data_vola28_MLP28_weights.31-0.0006.keras', 'PRSA_data_price_MLP21_weights.77-841.5739.keras', 'PRSA_data_vola28_MLP14_weights.35-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.26-0.0031.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0065.keras', 'PRSA_data_price_LSTM14_weights.02-51435.1133.keras', 'PRSA_data_price_LSTM28_weights.20-42714.7695.keras', 'PRSA_data_vola28_LSTM28_weights.16-0.0006.keras', 'PRSA_data_vola14_LSTM28_weights.06-0.0016.keras', 'PRSA_data_vola21_MLP21_weights.13-0.0010.keras', 'PRSA_data_vola28_MLP14_weights.32-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.01-0.0043.keras', 'PRSA_data_vola21_MLP28_weights.08-0.0011.keras', 'PRSA_data_vola21_LSTM28_weights.10-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.14-0.0012.keras', 'PRSA_data_vola7_LSTM28_weights.01-0.0049.keras', 'PRSA_data_price_LSTM28_weights.20-42715.6250.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM_weights.05-67155.5078.keras', 'PRSA_data_price_MLP14_weights.14-1073.6493.keras', 'PRSA_data_vola14_LSTM_weights.03-0.0029.keras', 'PRSA_data_vola14_MLP7_weights.07-0.0021.keras', 'PRSA_data_at_MLP28_weights.03-0.0637.keras', 'PRSA_data_vola21_MLP28_weights.03-0.0012.keras', 'PRSA_data_at_MLP28_weights.05-0.1823.keras', 'PRSA_data_at_MLP21_weights.01-0.9762.keras', 'PRSA_data_vola7_LSTM28_weights.15-0.0035.keras', 'PRSA_data_price_LSTM21_weights.08-42992.0703.keras', 'PRSA_data_vola21_MLP21_weights.22-0.0010.keras', 'PRSA_data_at_LSTM28_weights.01-6.0769.keras', 'PRSA_data_vola28_LSTM28_weights.20-0.0006.keras', 'PRSA_data_at_MLP21_weights.01-4.3241.keras', 'PRSA_data_at_LSTM_weights.06-0.8671.keras', 'PRSA_data_price_MLP7_weights.37-1654.4709.keras', 'PRSA_data_at_LSTM_weights.20-0.4626.keras', 'PRSA_data_vola21_MLP28_weights.27-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.4729.keras', 'PRSA_data_at_MLP7_weights.24-1.4718.keras', 'PRSA_data_at_MLP7_weights.59-0.4636.keras', 'PRSA_data_vola14_LSTM14_weights.06-0.0005.keras', 'PRSA_data_at_MLP7_weights.14-2.2322.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0196.keras', 'PRSA_data_at_LSTM14_weights.19-0.3356.keras', 'PRSA_data_price_LSTM28_weights.18-42735.2305.keras', 'PRSA_data_at_LSTM21_weights.04-0.8083.keras', 'PRSA_data_price_LSTM14_weights.12-51335.6523.keras', 'PRSA_data_vola21_LSTM_weights.06-0.0017.keras', 'PRSA_data_price_LSTM28_weights.05-42862.2383.keras', 'PRSA_data_price_MLP7_weights.10-1248.9592.keras', 'PRSA_data_vola21_MLP28_weights.35-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.03-0.0013.keras', 'PRSA_data_at_LSTM21_weights.03-1.0323.keras', 'PRSA_data_price_MLP14_weights.04-5028.7607.keras', 'PRSA_data_price_LSTM14_weights.08-51374.8945.keras', 'PRSA_data_vola28_MLP14_weights.48-0.0007.keras']
# Predicciones usando el mejor modelo
if best28_lstm_at is not None:
train_preds_at_LSTM28 = best28_lstm_at.predict(X_train_at_lstm_28)
val_preds_at_LSTM28 = best28_lstm_at.predict(X_val_at_lstm_28)
test_preds_at_LSTM28 = best28_lstm_at.predict(X_test_at_lstm_28)
# Aplanar las predicciones si es necesario
train_preds_at_LSTM28 = np.squeeze(train_preds_at_LSTM28)
val_preds_at_LSTM28 = np.squeeze(val_preds_at_LSTM28)
test_preds_at_LSTM28 = np.squeeze(test_preds_at_LSTM28)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_at_LSTM28)
print("Predicciones de validación:", val_preds_at_LSTM28)
print("Predicciones de prueba:", test_preds_at_LSTM28)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
153/153 [==============================] - 1s 3ms/step
1/1 [==============================] - 0s 9ms/step
1/1 [==============================] - 0s 9ms/step
Predicciones de Entrenamiento: [-0.16464579 -0.16464579 -0.16464579 ... 23.157084 23.155376
23.153486 ]
Predicciones de validación: [23.152178 23.154238 23.1588 23.160543 23.162085 23.163881 23.165194
23.165972 23.170765 23.176058 23.18073 23.184172 23.182032 23.177324
23.171373 23.166952 23.165258 23.164436 23.16165 23.158604 23.156282
23.154453 23.14985 23.144815 23.141035 23.138159 23.139002 23.14208 ]
Predicciones de prueba: [23.145075 23.149141 23.152662 23.154593 23.156164 23.157711 23.158747
23.15876 23.158428 23.158815 23.161383 23.16601 23.173231 23.181253
23.188791 23.196873 23.203922 23.211584 23.218903 23.22509 23.229498
23.233057 23.235762 23.238268 23.240198 23.241152 23.24106 23.241545]
plot_model(data_train_plot_at28[-100:], data_val_plot_at28, data_test_plot_at28, val_preds_at_LSTM28, test_preds_at_LSTM28, "Predicciones usando Memoria a Corto y Largo Paso (LSTM)")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train28_at, jarque_bera_pval_LSTM_train28_at = diagnostic_plots(y_train_at28, train_preds_at_LSTM28)
Ljung-Box LB Statistic: 2276.618134
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_at_LSTM_train28 = metricas(y_train_at28,train_preds_at_LSTM28)
metrica_at_LSTM_train28.index = metrica_at_LSTM_train28.index.map({0: 'LSTM Entrenamiento Retorno Acumulado τ = 28'})
metrica_at_LSTM_train28['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train28_at], index=metrica_at_LSTM_train28.index)
metrica_at_LSTM_train28['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train28_at], index=metrica_at_LSTM_train28.index)
metrica_at_LSTM_train28
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Retorno Acumulado τ = 28 | 93.0869 | 0.97% | 0.1 | 0.02 | 99.95% | 0.0 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM28_at, jarque_bera_pvalLSTM28_at = evaluate_residuals(data_test_plot_at28, test_preds_at_LSTM28)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test28_at = metricas(y_test_at28,test_preds_at_LSTM28)
metrica_LSTM_test28_at.index = metrica_LSTM_test28_at.index.map({0: 'LSTM Prueba Retorno Acumulado τ = 28'})
metrica_LSTM_test28_at['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM28_at], index=metrica_LSTM_test28_at.index)
metrica_LSTM_test28_at['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM28_at], index=metrica_LSTM_test28_at.index)
metrica_LSTM_test28_at
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Retorno Acumulado τ = 28 | 0.6838 | 0.63% | 0.15 | 0.02 | -244.58% | 0.0004 | 0.6241 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_at_LSTM28)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_at28, train_preds_at_LSTM28, y_val_at28, val_preds_at_LSTM28, y_test_at28, test_preds_at_LSTM28)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra muy cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Volatilidad ω = 7 (Volatilidad_7): Perceptrones Multicapa (MLP)#
Horizonte de 7 días (\(\tau=7\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(7,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [50], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_vola7, y_train_vola7)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 6.6s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 6.7s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 6.8s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 7.0s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 10.6s
Mejor función de activación: relu
Mejor número de epocas: 50
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -0.00720440624281764
A idexación de parámetros función build_models con base a lo indicado en el númeral 3 del parcial práctico.
input_shape7 = 7
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP7_vola7 = build_models_mlp(input_shape7, neurons_list, dropout_rates, 'relu')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_273"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_274 (InputLayer) [(None, 7)] 0
dense_684 (Dense) (None, 32) 256
dense_685 (Dense) (None, 16) 528
dense_686 (Dense) (None, 16) 272
dropout_273 (Dropout) (None, 16) 0
dense_687 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_274"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_275 (InputLayer) [(None, 7)] 0
dense_688 (Dense) (None, 32) 256
dense_689 (Dense) (None, 16) 528
dense_690 (Dense) (None, 16) 272
dropout_274 (Dropout) (None, 16) 0
dense_691 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_275"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_276 (InputLayer) [(None, 7)] 0
dense_692 (Dense) (None, 32) 256
dense_693 (Dense) (None, 16) 528
dense_694 (Dense) (None, 16) 272
dropout_275 (Dropout) (None, 16) 0
dense_695 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_276"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_277 (InputLayer) [(None, 7)] 0
dense_696 (Dense) (None, 32) 256
dense_697 (Dense) (None, 16) 528
dense_698 (Dense) (None, 16) 272
dropout_276 (Dropout) (None, 16) 0
dense_699 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_277"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_278 (InputLayer) [(None, 7)] 0
dense_700 (Dense) (None, 32) 256
dense_701 (Dense) (None, 16) 528
dense_702 (Dense) (None, 16) 272
dropout_277 (Dropout) (None, 16) 0
dense_703 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_278"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_279 (InputLayer) [(None, 7)] 0
dense_704 (Dense) (None, 32) 256
dense_705 (Dense) (None, 16) 528
dense_706 (Dense) (None, 16) 272
dropout_278 (Dropout) (None, 16) 0
dense_707 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_279"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_280 (InputLayer) [(None, 7)] 0
dense_708 (Dense) (None, 32) 256
dense_709 (Dense) (None, 16) 528
dense_710 (Dense) (None, 16) 272
dropout_279 (Dropout) (None, 16) 0
dense_711 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_280"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_281 (InputLayer) [(None, 7)] 0
dense_712 (Dense) (None, 32) 256
dense_713 (Dense) (None, 16) 528
dense_714 (Dense) (None, 16) 272
dropout_280 (Dropout) (None, 16) 0
dense_715 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_281"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_282 (InputLayer) [(None, 7)] 0
dense_716 (Dense) (None, 32) 256
dense_717 (Dense) (None, 16) 528
dense_718 (Dense) (None, 16) 272
dropout_281 (Dropout) (None, 16) 0
dense_719 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_282"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_283 (InputLayer) [(None, 7)] 0
dense_720 (Dense) (None, 32) 256
dense_721 (Dense) (None, 16) 528
dense_722 (Dense) (None, 16) 272
dropout_282 (Dropout) (None, 16) 0
dense_723 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_283"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_284 (InputLayer) [(None, 7)] 0
dense_724 (Dense) (None, 32) 256
dense_725 (Dense) (None, 16) 528
dense_726 (Dense) (None, 16) 272
dropout_283 (Dropout) (None, 16) 0
dense_727 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_284"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_285 (InputLayer) [(None, 7)] 0
dense_728 (Dense) (None, 32) 256
dense_729 (Dense) (None, 16) 528
dense_730 (Dense) (None, 16) 272
dropout_284 (Dropout) (None, 16) 0
dense_731 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_285"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_286 (InputLayer) [(None, 7)] 0
dense_732 (Dense) (None, 32) 256
dense_733 (Dense) (None, 16) 528
dense_734 (Dense) (None, 16) 272
dropout_285 (Dropout) (None, 16) 0
dense_735 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_286"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_287 (InputLayer) [(None, 7)] 0
dense_736 (Dense) (None, 32) 256
dense_737 (Dense) (None, 16) 528
dense_738 (Dense) (None, 16) 272
dropout_286 (Dropout) (None, 16) 0
dense_739 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_287"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_288 (InputLayer) [(None, 7)] 0
dense_740 (Dense) (None, 32) 256
dense_741 (Dense) (None, 16) 528
dense_742 (Dense) (None, 16) 272
dropout_287 (Dropout) (None, 16) 0
dense_743 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_288"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_289 (InputLayer) [(None, 7)] 0
dense_744 (Dense) (None, 32) 256
dense_745 (Dense) (None, 16) 528
dense_746 (Dense) (None, 16) 272
dropout_288 (Dropout) (None, 16) 0
dense_747 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola7_MLP7_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best7_vola7 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP7.
import os
from joblib import dump, load
history_vola7_MPL7 = []
# Iterar sobre cada modelo en la lista models_MLP7
for i, model in enumerate(models_MLP7_vola7):
filename = f'history_vola7_MPL7_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola7, y=y_train_vola7, batch_size=16, epochs=50,
verbose=2, callbacks=[save_best7_vola7], validation_data=(X_val_vola7, y_val_vola7),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola7_MPL7.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola7_MPL7_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL7_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL7_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL7_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL7_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL7_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL7_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL7_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL7_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL7_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL7_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL7_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL7_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL7_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL7_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL7_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola7_MLP7_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model7_vola7 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model7_vola7 = load_model(best_model_path) # Cargar el modelo
if best_model7_vola7 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola7_MLP7_weights.23-0.0033.keras con val_loss: 0.0033
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best_model7_vola7 is not None:
train_preds_vola7_MLP7 = best_model7_vola7.predict(X_train_vola7)
val_preds_vola7_MLP7 = best_model7_vola7.predict(X_val_vola7)
test_preds_vola7_MLP7 = best_model7_vola7.predict(X_test_vola7)
# Aplanar las predicciones si es necesario
train_preds_vola7_MLP7 = np.squeeze(train_preds_vola7_MLP7)
val_preds_vola7_MLP7 = np.squeeze(val_preds_vola7_MLP7)
test_preds_vola7_MLP7 = np.squeeze(test_preds_vola7_MLP7)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola7_MLP7)
print("Predicciones de validación:", val_preds_vola7_MLP7)
print("Predicciones de prueba:", test_preds_vola7_MLP7)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
156/156 [==============================] - 0s 191us/step
1/1 [==============================] - 0s 8ms/step
1/1 [==============================] - 0s 8ms/step
Predicciones de Entrenamiento: [0.00423898 0.00423898 0.00423898 ... 0.04334676 0.04111054 0.0431885 ]
Predicciones de validación: [0.04139544 0.04364643 0.05262981 0.04575547 0.04430497 0.04464185
0.04381208]
Predicciones de prueba: [0.04323046 0.03411325 0.01797005 0.01620544 0.02108258 0.02420781
0.03208127]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 28 días (\(\tau = 28\)).
data_train_plot_vola7, data_val_plot_vola7, data_test_plot_vola7 = data_plot(df_1_st['Volatilidad_7'], 7)
Datos de entrenamiento:
4998 0.0000
4997 0.0000
4996 0.0000
4995 0.0000
4994 0.0000
...
18 0.0463
17 0.0444
16 0.0444
15 0.0438
14 0.0438
Name: Volatilidad_7, Length: 4985, dtype: float647
Datos de validación:
13 0.0360
12 0.0188
11 0.0171
10 0.0223
9 0.0256
8 0.0349
7 0.0402
Name: Volatilidad_7, dtype: float647
Datos de prueba:
6 0.0347
5 0.0444
4 0.0607
3 0.0613
2 0.0613
1 0.0573
0 0.0577
Name: Volatilidad_7, dtype: float647
plot_model(data_train_plot_vola7[-100:], data_val_plot_vola7, data_test_plot_vola7, val_preds_vola7_MLP7, test_preds_vola7_MLP7, "Predicciones usando Perceptrón Multicapa (MLP)")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train7_vola7, jarque_bera_pval_MLP_train7_vola7 = diagnostic_plots(y_train_vola7, train_preds_vola7_MLP7)
Ljung-Box LB Statistic: 29.815827
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola7_MLP_train = metricas(y_train_vola7,train_preds_vola7_MLP7)
metrica_vola7_MLP_train.index = metrica_vola7_MLP_train.index.map({0: 'MLP Entrenamiento Volatilidad ω = 7 y τ = 7'})
metrica_vola7_MLP_train['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train7_vola7], index=metrica_vola7_MLP_train.index)
metrica_vola7_MLP_train['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train7_vola7], index=metrica_vola7_MLP_train.index)
metrica_vola7_MLP_train
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Volatilidad ω = 7 y τ = 7 | 2.5823 | 17.98% | 0.01 | 0.0 | 87.67% | 4.7510e-08 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP7_vola7, jarque_bera_pvalMLP7_vola7 = evaluate_residuals(data_test_plot_vola7, test_preds_vola7_MLP7)
No se rechaza H0: los residuales son independientes (no correlacionados).
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP7_test_vola7 = metricas(y_test_vola7,test_preds_vola7_MLP7)
metrica_MLP7_test_vola7.index = metrica_MLP7_test_vola7.index.map({0: 'MLP Prueba Volatilidad ω = 7 y τ = 7'})
metrica_MLP7_test_vola7['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP7_vola7], index=metrica_MLP7_test_vola7.index)
metrica_MLP7_test_vola7['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP7_vola7], index=metrica_MLP7_test_vola7.index)
metrica_MLP7_test_vola7
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Volatilidad ω = 7 y τ = 7 | 0.0005 | 28.82% | 0.01 | 0.0 | -4.83% | 0.1964 | 0.6142 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola7_MPL7)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola7, train_preds_vola7_MLP7, y_val_at7, val_preds_vola7_MLP7, y_test_vola7, test_preds_vola7_MLP7)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra muy cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra muy cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 14 días (\(\tau=14\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(14,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': ['tanh'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [50], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_vola14, y_train_vola14)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 8.4s
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 8.8s
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 8.1s
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 8.1s
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 8.2s
Mejor función de activación: tanh
Mejor número de epocas: 50
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -0.007385299261659384
A continuación, configuramos la red MLP empleando la API funcional de Keras. Con este método, una capa puede ser designada como la entrada para la siguiente capa en el momento de su definición.
En este contexto, Input es una función utilizada para establecer una capa de entrada en un modelo de red neuronal. La propiedad shape=(14,) define la estructura de los datos de entrada, lo que indica que estos tendrán 14 dimensiones. Por otro lado, dtype='float32' especifica que los elementos de esta capa serán números de punto flotante de 32 bits
A continuación se define función buil_models para generación de modelos de acuerdo a los parámetros indexados a tráves de la lista.
A idexación de parámetros función build_models_mlp con base a lo indicado en el númeral 3 del parcial práctico.
input_shape14 = 14
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP14_vola7 = build_models_mlp(input_shape14, neurons_list, dropout_rates, 'tanh')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_290"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_291 (InputLayer) [(None, 14)] 0
dense_752 (Dense) (None, 32) 480
dense_753 (Dense) (None, 16) 528
dense_754 (Dense) (None, 16) 272
dropout_290 (Dropout) (None, 16) 0
dense_755 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_291"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_292 (InputLayer) [(None, 14)] 0
dense_756 (Dense) (None, 32) 480
dense_757 (Dense) (None, 16) 528
dense_758 (Dense) (None, 16) 272
dropout_291 (Dropout) (None, 16) 0
dense_759 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_292"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_293 (InputLayer) [(None, 14)] 0
dense_760 (Dense) (None, 32) 480
dense_761 (Dense) (None, 16) 528
dense_762 (Dense) (None, 16) 272
dropout_292 (Dropout) (None, 16) 0
dense_763 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_293"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_294 (InputLayer) [(None, 14)] 0
dense_764 (Dense) (None, 32) 480
dense_765 (Dense) (None, 16) 528
dense_766 (Dense) (None, 16) 272
dropout_293 (Dropout) (None, 16) 0
dense_767 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_294"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_295 (InputLayer) [(None, 14)] 0
dense_768 (Dense) (None, 32) 480
dense_769 (Dense) (None, 16) 528
dense_770 (Dense) (None, 16) 272
dropout_294 (Dropout) (None, 16) 0
dense_771 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_295"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_296 (InputLayer) [(None, 14)] 0
dense_772 (Dense) (None, 32) 480
dense_773 (Dense) (None, 16) 528
dense_774 (Dense) (None, 16) 272
dropout_295 (Dropout) (None, 16) 0
dense_775 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_296"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_297 (InputLayer) [(None, 14)] 0
dense_776 (Dense) (None, 32) 480
dense_777 (Dense) (None, 16) 528
dense_778 (Dense) (None, 16) 272
dropout_296 (Dropout) (None, 16) 0
dense_779 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_297"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_298 (InputLayer) [(None, 14)] 0
dense_780 (Dense) (None, 32) 480
dense_781 (Dense) (None, 16) 528
dense_782 (Dense) (None, 16) 272
dropout_297 (Dropout) (None, 16) 0
dense_783 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_298"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_299 (InputLayer) [(None, 14)] 0
dense_784 (Dense) (None, 32) 480
dense_785 (Dense) (None, 16) 528
dense_786 (Dense) (None, 16) 272
dropout_298 (Dropout) (None, 16) 0
dense_787 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_299"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_300 (InputLayer) [(None, 14)] 0
dense_788 (Dense) (None, 32) 480
dense_789 (Dense) (None, 16) 528
dense_790 (Dense) (None, 16) 272
dropout_299 (Dropout) (None, 16) 0
dense_791 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_300"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_301 (InputLayer) [(None, 14)] 0
dense_792 (Dense) (None, 32) 480
dense_793 (Dense) (None, 16) 528
dense_794 (Dense) (None, 16) 272
dropout_300 (Dropout) (None, 16) 0
dense_795 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_301"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_302 (InputLayer) [(None, 14)] 0
dense_796 (Dense) (None, 32) 480
dense_797 (Dense) (None, 16) 528
dense_798 (Dense) (None, 16) 272
dropout_301 (Dropout) (None, 16) 0
dense_799 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_302"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_303 (InputLayer) [(None, 14)] 0
dense_800 (Dense) (None, 32) 480
dense_801 (Dense) (None, 16) 528
dense_802 (Dense) (None, 16) 272
dropout_302 (Dropout) (None, 16) 0
dense_803 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_303"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_304 (InputLayer) [(None, 14)] 0
dense_804 (Dense) (None, 32) 480
dense_805 (Dense) (None, 16) 528
dense_806 (Dense) (None, 16) 272
dropout_303 (Dropout) (None, 16) 0
dense_807 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_304"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_305 (InputLayer) [(None, 14)] 0
dense_808 (Dense) (None, 32) 480
dense_809 (Dense) (None, 16) 528
dense_810 (Dense) (None, 16) 272
dropout_304 (Dropout) (None, 16) 0
dense_811 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_305"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_306 (InputLayer) [(None, 14)] 0
dense_812 (Dense) (None, 32) 480
dense_813 (Dense) (None, 16) 528
dense_814 (Dense) (None, 16) 272
dropout_305 (Dropout) (None, 16) 0
dense_815 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola7_MLP14_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best14_vola7 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP14.
import os
from joblib import dump, load
history_vola7_MPL14 = []
# Iterar sobre cada modelo en la lista models
for i, model in enumerate(models_MLP14_vola7):
filename = f'history_vola7_MPL14_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola14, y=y_train_vola14, batch_size=16, epochs=50,
verbose=2, callbacks=[save_best14_vola7], validation_data=(X_val_vola14, y_val_vola14),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola7_MPL14.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola7_MPL14_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL14_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL14_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL14_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL14_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL14_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL14_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL14_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL14_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL14_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL14_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL14_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL14_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL14_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL14_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL14_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola7_MLP14_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model14_vola7 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model14_vola7 = load_model(best_model_path) # Cargar el modelo
if best_model14_vola7 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola7_MLP14_weights.05-0.0018.keras con val_loss: 0.0018
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best_model14_vola7 is not None:
train_preds_vola7_MLP14 = best_model14_vola7.predict(X_train_vola14)
val_preds_vola7_MLP14 = best_model14_vola7.predict(X_val_vola14)
test_preds_vola7_MLP14 = best_model14_vola7.predict(X_test_vola14)
# Aplanar las predicciones si es necesario
train_preds_vola7_MLP14 = np.squeeze(train_preds_vola7_MLP14)
val_preds_vola7_MLP14 = np.squeeze(val_preds_vola7_MLP14)
test_preds_vola7_MLP14 = np.squeeze(test_preds_vola7_MLP14)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola7_MLP14)
print("Predicciones de validación:", val_preds_vola7_MLP14)
print("Predicciones de prueba:", test_preds_vola7_MLP14)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
155/155 [==============================] - 0s 239us/step
1/1 [==============================] - 0s 13ms/step
1/1 [==============================] - 0s 8ms/step
Predicciones de Entrenamiento: [0.00164254 0.00164254 0.00164254 ... 0.01641651 0.01922221 0.01797052]
Predicciones de validación: [0.01557103 0.01413742 0.01610323 0.01725768 0.01788708 0.0173529
0.01973658 0.01954738 0.01691689 0.01641493 0.00893137 0.00980659
0.00912014 0.0113398 ]
Predicciones de prueba: [0.01107316 0.02227576 0.02853836 0.03826828 0.04053598 0.03758059
0.03970905 0.03625864 0.03945739 0.04863076 0.04210615 0.04040461
0.04034238 0.04068169]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 14 días (\(\tau = 14\)).
data_train_plot_vola7_14, data_val_plot_vola7_14, data_test_plot_vola7_14 = data_plot(df_1_st['Volatilidad_7'], 14)
Datos de entrenamiento:
4998 0.0000
4997 0.0000
4996 0.0000
4995 0.0000
4994 0.0000
...
32 0.0080
31 0.0088
30 0.0090
29 0.0114
28 0.0107
Name: Volatilidad_7, Length: 4971, dtype: float6414
Datos de validación:
27 0.0229
26 0.0273
25 0.0393
24 0.0411
23 0.0380
22 0.0403
21 0.0391
20 0.0432
19 0.0563
18 0.0463
17 0.0444
16 0.0444
15 0.0438
14 0.0438
Name: Volatilidad_7, dtype: float6414
Datos de prueba:
13 0.0360
12 0.0188
11 0.0171
10 0.0223
9 0.0256
8 0.0349
7 0.0402
6 0.0347
5 0.0444
4 0.0607
3 0.0613
2 0.0613
1 0.0573
0 0.0577
Name: Volatilidad_7, dtype: float6414
plot_model(data_train_plot_vola7_14[-100:], data_val_plot_vola7_14, data_test_plot_vola7_14, val_preds_vola7_MLP14, test_preds_vola7_MLP14, "Predicciones usando Perceptrón Multicapa (MLP) para un horizonte de 14 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de validación(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train14_vola7, jarque_bera_pval_MLP_train14_vola7 = diagnostic_plots(y_train_vola14, train_preds_vola7_MLP14)
Ljung-Box LB Statistic: 979.284539
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola7_MLP_train14 = metricas(y_train_vola14,train_preds_vola7_MLP14)
metrica_vola7_MLP_train14.index = metrica_vola7_MLP_train14.index.map({0: 'MLP Entrenamiento Volatilidad ω = 7 y τ = 14'})
metrica_vola7_MLP_train14['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train14_vola7], index=metrica_vola7_MLP_train14.index)
metrica_vola7_MLP_train14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train14_vola7], index=metrica_vola7_MLP_train14.index)
metrica_vola7_MLP_train14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Volatilidad ω = 7 y τ = 14 | 3.7256 | 17.22% | 0.01 | 0.0 | 82.18% | 5.7162e-215 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP14_vola7, jarque_bera_pvalMLP14_vola7 = evaluate_residuals(data_test_plot_vola7_14, test_preds_vola7_MLP14)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP14_test_vola7 = metricas(y_test_vola14,test_preds_vola7_MLP14)
metrica_MLP14_test_vola7.index = metrica_MLP14_test_vola7.index.map({0: 'MLP Prueba Volatilidad ω = 7 τ = 14'})
metrica_MLP14_test_vola7['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP14_vola7], index=metrica_MLP14_test_vola7.index)
metrica_MLP14_test_vola7['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP14_vola7], index=metrica_MLP14_test_vola7.index)
metrica_MLP14_test_vola7
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Volatilidad ω = 7 τ = 14 | 0.0007 | 14.26% | 0.01 | 0.0 | 18.43% | 0.0075 | 0.5912 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola7_MPL14)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola14, train_preds_vola7_MLP14, y_val_vola14, val_preds_vola7_MLP14, y_test_vola14, test_preds_vola7_MLP14)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra muy cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra cercana error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado. Sin embargo, a pesar de esto, se evidencia que existe autocorrelación en los residuales, lo que hace que nuestro modelo no sea confiable.
Horizonte de 21 días (\(\tau=21\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(21,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': ['tanh'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [100], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_vola21, y_train_vola21)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ...activation=tanh, epochs=100, learning_rate=0.001; total time= 17.3s
[CV] END ...activation=tanh, epochs=100, learning_rate=0.001; total time= 17.3s
[CV] END ...activation=tanh, epochs=100, learning_rate=0.001; total time= 17.3s
[CV] END ...activation=tanh, epochs=100, learning_rate=0.001; total time= 17.4s
[CV] END ...activation=tanh, epochs=100, learning_rate=0.001; total time= 21.0s
Mejor función de activación: tanh
Mejor número de epocas: 100
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -0.006836974434554577
A continuación, configuramos la red MLP empleando la API funcional de Keras. Con este método, una capa puede ser designada como la entrada para la siguiente capa en el momento de su definición.
En este contexto, Input es una función utilizada para establecer una capa de entrada en un modelo de red neuronal. La propiedad shape=(21,) define la estructura de los datos de entrada, lo que indica que estos tendrán 21 dimensiones. Por otro lado, dtype='float32' especifica que los elementos de esta capa serán números de punto flotante de 32 bits.
A continuación se define función buil_models para generación de modelos de acuerdo a los parámetros indexados a tráves de la lista.
A idexación de parámetros función build_models_mlp con base a lo indicado en el númeral 3 del parcial práctico.
input_shape21 = 21
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP21_vola7 = build_models_mlp(input_shape21, neurons_list, dropout_rates, 'tanh')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_307"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_308 (InputLayer) [(None, 21)] 0
dense_820 (Dense) (None, 32) 704
dense_821 (Dense) (None, 16) 528
dense_822 (Dense) (None, 16) 272
dropout_307 (Dropout) (None, 16) 0
dense_823 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_308"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_309 (InputLayer) [(None, 21)] 0
dense_824 (Dense) (None, 32) 704
dense_825 (Dense) (None, 16) 528
dense_826 (Dense) (None, 16) 272
dropout_308 (Dropout) (None, 16) 0
dense_827 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_309"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_310 (InputLayer) [(None, 21)] 0
dense_828 (Dense) (None, 32) 704
dense_829 (Dense) (None, 16) 528
dense_830 (Dense) (None, 16) 272
dropout_309 (Dropout) (None, 16) 0
dense_831 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_310"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_311 (InputLayer) [(None, 21)] 0
dense_832 (Dense) (None, 32) 704
dense_833 (Dense) (None, 16) 528
dense_834 (Dense) (None, 16) 272
dropout_310 (Dropout) (None, 16) 0
dense_835 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_311"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_312 (InputLayer) [(None, 21)] 0
dense_836 (Dense) (None, 32) 704
dense_837 (Dense) (None, 16) 528
dense_838 (Dense) (None, 16) 272
dropout_311 (Dropout) (None, 16) 0
dense_839 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_312"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_313 (InputLayer) [(None, 21)] 0
dense_840 (Dense) (None, 32) 704
dense_841 (Dense) (None, 16) 528
dense_842 (Dense) (None, 16) 272
dropout_312 (Dropout) (None, 16) 0
dense_843 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_313"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_314 (InputLayer) [(None, 21)] 0
dense_844 (Dense) (None, 32) 704
dense_845 (Dense) (None, 16) 528
dense_846 (Dense) (None, 16) 272
dropout_313 (Dropout) (None, 16) 0
dense_847 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_314"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_315 (InputLayer) [(None, 21)] 0
dense_848 (Dense) (None, 32) 704
dense_849 (Dense) (None, 16) 528
dense_850 (Dense) (None, 16) 272
dropout_314 (Dropout) (None, 16) 0
dense_851 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_315"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_316 (InputLayer) [(None, 21)] 0
dense_852 (Dense) (None, 32) 704
dense_853 (Dense) (None, 16) 528
dense_854 (Dense) (None, 16) 272
dropout_315 (Dropout) (None, 16) 0
dense_855 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_316"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_317 (InputLayer) [(None, 21)] 0
dense_856 (Dense) (None, 32) 704
dense_857 (Dense) (None, 16) 528
dense_858 (Dense) (None, 16) 272
dropout_316 (Dropout) (None, 16) 0
dense_859 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_317"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_318 (InputLayer) [(None, 21)] 0
dense_860 (Dense) (None, 32) 704
dense_861 (Dense) (None, 16) 528
dense_862 (Dense) (None, 16) 272
dropout_317 (Dropout) (None, 16) 0
dense_863 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_318"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_319 (InputLayer) [(None, 21)] 0
dense_864 (Dense) (None, 32) 704
dense_865 (Dense) (None, 16) 528
dense_866 (Dense) (None, 16) 272
dropout_318 (Dropout) (None, 16) 0
dense_867 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_319"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_320 (InputLayer) [(None, 21)] 0
dense_868 (Dense) (None, 32) 704
dense_869 (Dense) (None, 16) 528
dense_870 (Dense) (None, 16) 272
dropout_319 (Dropout) (None, 16) 0
dense_871 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_320"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_321 (InputLayer) [(None, 21)] 0
dense_872 (Dense) (None, 32) 704
dense_873 (Dense) (None, 16) 528
dense_874 (Dense) (None, 16) 272
dropout_320 (Dropout) (None, 16) 0
dense_875 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_321"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_322 (InputLayer) [(None, 21)] 0
dense_876 (Dense) (None, 32) 704
dense_877 (Dense) (None, 16) 528
dense_878 (Dense) (None, 16) 272
dropout_321 (Dropout) (None, 16) 0
dense_879 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_322"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_323 (InputLayer) [(None, 21)] 0
dense_880 (Dense) (None, 32) 704
dense_881 (Dense) (None, 16) 528
dense_882 (Dense) (None, 16) 272
dropout_322 (Dropout) (None, 16) 0
dense_883 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola7_MLP21_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best21_vola7 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP21.
import os
from joblib import dump, load
# Suponiendo que ts_model es una lista de modelos
history_vola7_MPL21 = []
# Iterar sobre cada modelo en la lista models_MLP14
for i, model in enumerate(models_MLP21_vola7):
filename = f'history_vola7_MPL21_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola21, y=y_train_vola21, batch_size=16, epochs=100,
verbose=2, callbacks=[save_best21_vola7], validation_data=(X_val_vola21, y_val_vola21),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola7_MPL21.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola7_MPL21_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL21_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL21_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL21_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL21_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL21_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL21_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL21_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL21_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL21_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL21_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL21_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL21_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL21_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL21_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL21_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola7_MLP21_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model21_vola7 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model21_vola7 = load_model(best_model_path) # Cargar el modelo
if best_model21_vola7 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola7_MLP21_weights.94-0.0024.keras con val_loss: 0.0024
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best_model21_vola7 is not None:
train_preds_vola7_MLP21 = best_model21_vola7.predict(X_train_vola21)
val_preds_vola7_MLP21 = best_model21_vola7.predict(X_val_vola21)
test_preds_vola7_MLP21 = best_model21_vola7.predict(X_test_vola21)
# Aplanar las predicciones si es necesario
train_preds_vola7_MLP21 = np.squeeze(train_preds_vola7_MLP21)
val_preds_vola7_MLP21 = np.squeeze(val_preds_vola7_MLP21)
test_preds_vola7_MLP21 = np.squeeze(test_preds_vola7_MLP21)
# Imprimir las predicciones
print('Predicciones de entrenamiento', train_preds_vola7_MLP21)
print("Predicciones de validación:", val_preds_vola7_MLP21)
print("Predicciones de prueba:", test_preds_vola7_MLP21)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
154/154 [==============================] - 0s 195us/step
1/1 [==============================] - 0s 10ms/step
1/1 [==============================] - 0s 9ms/step
Predicciones de entrenamiento [0.00211542 0.00211542 0.00211542 ... 0.02692614 0.01982317 0.01988152]
Predicciones de validación: [0.02118946 0.02202928 0.02317089 0.02266917 0.02384281 0.02764382
0.02818159 0.02680754 0.01963386 0.02043094 0.02249693 0.01887584
0.01419025 0.01518366 0.01802568 0.00811394 0.00916726 0.01432351
0.01656792 0.01876501 0.01725021]
Predicciones de prueba: [0.01680177 0.01328124 0.01506495 0.01746774 0.01830186 0.01813584
0.02132928 0.02057712 0.01646137 0.01675731 0.00849648 0.00976775
0.00945496 0.01249724 0.01248461 0.02341114 0.03005834 0.03801074
0.04037543 0.03617397 0.03958373]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 21 días (\(\tau = 21\)).
data_train_plot_vola21, data_val_plot_vola21, data_test_plot_vola21 = data_plot(df_1_st['Volatilidad_7'], 21)
Datos de entrenamiento:
4998 0.0000
4997 0.0000
4996 0.0000
4995 0.0000
4994 0.0000
...
46 0.0126
45 0.0139
44 0.0184
43 0.0168
42 0.0131
Name: Volatilidad_7, Length: 4957, dtype: float6421
Datos de validación:
41 0.0123
40 0.0155
39 0.0172
38 0.0189
37 0.0171
36 0.0194
35 0.0194
34 0.0173
33 0.0167
32 0.0080
31 0.0088
30 0.0090
29 0.0114
28 0.0107
27 0.0229
26 0.0273
25 0.0393
24 0.0411
23 0.0380
22 0.0403
21 0.0391
Name: Volatilidad_7, dtype: float6421
Datos de prueba:
20 0.0432
19 0.0563
18 0.0463
17 0.0444
16 0.0444
15 0.0438
14 0.0438
13 0.0360
12 0.0188
11 0.0171
10 0.0223
9 0.0256
8 0.0349
7 0.0402
6 0.0347
5 0.0444
4 0.0607
3 0.0613
2 0.0613
1 0.0573
0 0.0577
Name: Volatilidad_7, dtype: float6421
plot_model(data_train_plot_vola21[-100:], data_val_plot_vola21, data_test_plot_vola21, val_preds_vola7_MLP21, test_preds_vola7_MLP21, "Predicciones usando Perceptrón Multicapa (MLP) para un horizonte de 21 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de validación(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train21_vola7, jarque_bera_pval_MLP_train21_vola7 = diagnostic_plots(y_train_vola21, train_preds_vola7_MLP21)
Ljung-Box LB Statistic: 1224.797069
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola7_MLP_train21 = metricas(y_train_vola21,train_preds_vola7_MLP21)
metrica_vola7_MLP_train21.index = metrica_vola7_MLP_train21.index.map({0: 'MLP Entrenamiento Volatilidad ω = 7 y τ = 21'})
metrica_vola7_MLP_train21['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train21_vola7], index=metrica_vola7_MLP_train21.index)
metrica_vola7_MLP_train21['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train21_vola7], index=metrica_vola7_MLP_train21.index)
metrica_vola7_MLP_train21
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Volatilidad ω = 7 y τ = 21 | 3.8423 | 16.05% | 0.01 | 0.0 | 81.61% | 2.4903e-268 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP21_vola7, jarque_bera_pvalMLP21_vola7 = evaluate_residuals(data_test_plot_vola21, test_preds_vola7_MLP21)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP21_test_vola7 = metricas(y_test_vola21,test_preds_vola7_MLP21)
metrica_MLP21_test_vola7.index = metrica_MLP21_test_vola7.index.map({0: 'MLP Prueba Volatilidad ω = 7 y τ = 21'})
metrica_MLP21_test_vola7['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP21_vola7], index=metrica_MLP21_test_vola7.index)
metrica_MLP21_test_vola7['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP21_vola7], index=metrica_MLP21_test_vola7.index)
metrica_MLP21_test_vola7
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Volatilidad ω = 7 y τ = 21 | 0.0004 | 18.01% | 0.0 | 0.0 | 85.62% | 0.0012 | 0.4394 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola7_MPL21)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola21, train_preds_vola7_MLP21, y_val_vola21, val_preds_vola7_MLP21, y_test_vola21, test_preds_vola7_MLP21)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo. De igual forma se observa una alta variabilidad en los residuales.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 28 días (\(\tau=28\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(28,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': ['tanh'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [50], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_vola28, y_train_vola28)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 7.7s
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 7.9s
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 7.9s
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 8.1s
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 8.1s
Mejor función de activación: tanh
Mejor número de epocas: 50
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -0.007541426550596952
A continuación, configuramos la red MLP empleando la API funcional de Keras. Con este método, una capa puede ser designada como la entrada para la siguiente capa en el momento de su definición.
En este contexto, Input es una función utilizada para establecer una capa de entrada en un modelo de red neuronal. La propiedad shape=(28,) define la estructura de los datos de entrada, lo que indica que estos tendrán 28 dimensiones. Por otro lado, dtype='float32' especifica que los elementos de esta capa serán números de punto flotante de 32 bits.
A continuación se define función buil_models para generación de modelos de acuerdo a los parámetros indexados a tráves de la lista.
A idexación de parámetros función build_models_mlp con base a lo indicado en el númeral 3 del parcial práctico.
input_shape28 = 28
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP28_vola7 = build_models_mlp(input_shape28, neurons_list, dropout_rates, 'tanh')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_324"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_325 (InputLayer) [(None, 28)] 0
dense_888 (Dense) (None, 32) 928
dense_889 (Dense) (None, 16) 528
dense_890 (Dense) (None, 16) 272
dropout_324 (Dropout) (None, 16) 0
dense_891 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_325"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_326 (InputLayer) [(None, 28)] 0
dense_892 (Dense) (None, 32) 928
dense_893 (Dense) (None, 16) 528
dense_894 (Dense) (None, 16) 272
dropout_325 (Dropout) (None, 16) 0
dense_895 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_326"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_327 (InputLayer) [(None, 28)] 0
dense_896 (Dense) (None, 32) 928
dense_897 (Dense) (None, 16) 528
dense_898 (Dense) (None, 16) 272
dropout_326 (Dropout) (None, 16) 0
dense_899 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_327"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_328 (InputLayer) [(None, 28)] 0
dense_900 (Dense) (None, 32) 928
dense_901 (Dense) (None, 16) 528
dense_902 (Dense) (None, 16) 272
dropout_327 (Dropout) (None, 16) 0
dense_903 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_328"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_329 (InputLayer) [(None, 28)] 0
dense_904 (Dense) (None, 32) 928
dense_905 (Dense) (None, 16) 528
dense_906 (Dense) (None, 16) 272
dropout_328 (Dropout) (None, 16) 0
dense_907 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_329"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_330 (InputLayer) [(None, 28)] 0
dense_908 (Dense) (None, 32) 928
dense_909 (Dense) (None, 16) 528
dense_910 (Dense) (None, 16) 272
dropout_329 (Dropout) (None, 16) 0
dense_911 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_330"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_331 (InputLayer) [(None, 28)] 0
dense_912 (Dense) (None, 32) 928
dense_913 (Dense) (None, 16) 528
dense_914 (Dense) (None, 16) 272
dropout_330 (Dropout) (None, 16) 0
dense_915 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_331"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_332 (InputLayer) [(None, 28)] 0
dense_916 (Dense) (None, 32) 928
dense_917 (Dense) (None, 16) 528
dense_918 (Dense) (None, 16) 272
dropout_331 (Dropout) (None, 16) 0
dense_919 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_332"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_333 (InputLayer) [(None, 28)] 0
dense_920 (Dense) (None, 32) 928
dense_921 (Dense) (None, 16) 528
dense_922 (Dense) (None, 16) 272
dropout_332 (Dropout) (None, 16) 0
dense_923 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_333"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_334 (InputLayer) [(None, 28)] 0
dense_924 (Dense) (None, 32) 928
dense_925 (Dense) (None, 16) 528
dense_926 (Dense) (None, 16) 272
dropout_333 (Dropout) (None, 16) 0
dense_927 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_334"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_335 (InputLayer) [(None, 28)] 0
dense_928 (Dense) (None, 32) 928
dense_929 (Dense) (None, 16) 528
dense_930 (Dense) (None, 16) 272
dropout_334 (Dropout) (None, 16) 0
dense_931 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_335"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_336 (InputLayer) [(None, 28)] 0
dense_932 (Dense) (None, 32) 928
dense_933 (Dense) (None, 16) 528
dense_934 (Dense) (None, 16) 272
dropout_335 (Dropout) (None, 16) 0
dense_935 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_336"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_337 (InputLayer) [(None, 28)] 0
dense_936 (Dense) (None, 32) 928
dense_937 (Dense) (None, 16) 528
dense_938 (Dense) (None, 16) 272
dropout_336 (Dropout) (None, 16) 0
dense_939 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_337"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_338 (InputLayer) [(None, 28)] 0
dense_940 (Dense) (None, 32) 928
dense_941 (Dense) (None, 16) 528
dense_942 (Dense) (None, 16) 272
dropout_337 (Dropout) (None, 16) 0
dense_943 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_338"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_339 (InputLayer) [(None, 28)] 0
dense_944 (Dense) (None, 32) 928
dense_945 (Dense) (None, 16) 528
dense_946 (Dense) (None, 16) 272
dropout_338 (Dropout) (None, 16) 0
dense_947 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_339"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_340 (InputLayer) [(None, 28)] 0
dense_948 (Dense) (None, 32) 928
dense_949 (Dense) (None, 16) 528
dense_950 (Dense) (None, 16) 272
dropout_339 (Dropout) (None, 16) 0
dense_951 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola7_MLP28_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best28_vola7 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP28.
import os
from joblib import dump, load
# Suponiendo que ts_model es una lista de modelos
history_vola7_MPL28 = []
# Iterar sobre cada modelo en la lista models
for i, model in enumerate(models_MLP28_vola7):
filename = f'history_vola7_MPL28_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola28, y=y_train_vola28, batch_size=16, epochs=50, #Las epocas son las estimadas en el grid search
verbose=2, callbacks=[save_best28_vola7], validation_data=(X_val_vola28, y_val_vola28),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola7_MPL28.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola7_MPL28_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL28_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL28_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL28_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL28_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL28_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL28_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL28_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL28_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL28_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL28_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL28_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL28_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL28_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL28_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_MPL28_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola7_MLP28_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model28_vola7 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model28_vola7 = load_model(best_model_path) # Cargar el modelo
if best_model28_vola7 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola7_MLP28_weights.32-0.0029.keras con val_loss: 0.0029
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best_model28_vola7 is not None:
train_preds_vola7_MLP28 = best_model28_vola7.predict(X_train_vola28)
val_preds_vola7_MLP28 = best_model28_vola7.predict(X_val_vola28)
test_preds_vola7_MLP28 = best_model28_vola7.predict(X_test_vola28)
# Aplanar las predicciones si es necesario
train_preds_vola7_MLP28 = np.squeeze(train_preds_vola7_MLP28)
val_preds_vola7_MLP28 = np.squeeze(val_preds_vola7_MLP28)
test_preds_vola7_MLP28 = np.squeeze(test_preds_vola7_MLP28)
# Imprimir las predicciones
print("Predicciones de entrenamiento:", train_preds_vola7_MLP28)
print("Predicciones de validación:", val_preds_vola7_MLP28)
print("Predicciones de prueba:", test_preds_vola7_MLP28)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
153/153 [==============================] - 0s 192us/step
1/1 [==============================] - 0s 8ms/step
1/1 [==============================] - 0s 9ms/step
Predicciones de entrenamiento: [0.00122369 0.00122369 0.00122369 ... 0.01869361 0.01823867 0.01843898]
Predicciones de validación: [0.01965564 0.0249837 0.02388054 0.02899083 0.03142117 0.02996834
0.03053402 0.02946956 0.03632463 0.03558132 0.02950555 0.02993879
0.04244857 0.04243865 0.04043846 0.03176515 0.03419735 0.03157211
0.02830457 0.02012498 0.02029579 0.02066696 0.02256245 0.02292681
0.0232622 0.02247726 0.02822454 0.02822946]
Predicciones de prueba: [0.02763509 0.01964802 0.02206122 0.02294054 0.02038712 0.01496809
0.01742661 0.01965767 0.01100218 0.01027017 0.01635244 0.0178361
0.02058693 0.01810741 0.01714929 0.01356719 0.01558089 0.01748828
0.01895088 0.01874853 0.02220165 0.02236787 0.01828931 0.01700567
0.009808 0.01106531 0.01060213 0.01302596]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 28 días (\(\tau = 28\)).
data_train_plot_vola28, data_val_plot_vola28 , data_test_plot_vola28 = data_plot(df_1_st['Volatilidad_7'], 28)
Datos de entrenamiento:
4998 0.0000
4997 0.0000
4996 0.0000
4995 0.0000
4994 0.0000
...
60 0.0230
59 0.0201
58 0.0280
57 0.0281
56 0.0281
Name: Volatilidad_7, Length: 4943, dtype: float6428
Datos de validación:
55 0.0186
54 0.0203
53 0.0215
52 0.0208
51 0.0135
50 0.0139
49 0.0146
48 0.0080
47 0.0084
46 0.0126
45 0.0139
44 0.0184
43 0.0168
42 0.0131
41 0.0123
40 0.0155
39 0.0172
38 0.0189
37 0.0171
36 0.0194
35 0.0194
34 0.0173
33 0.0167
32 0.0080
31 0.0088
30 0.0090
29 0.0114
28 0.0107
Name: Volatilidad_7, dtype: float6428
Datos de prueba:
27 0.0229
26 0.0273
25 0.0393
24 0.0411
23 0.0380
22 0.0403
21 0.0391
20 0.0432
19 0.0563
18 0.0463
17 0.0444
16 0.0444
15 0.0438
14 0.0438
13 0.0360
12 0.0188
11 0.0171
10 0.0223
9 0.0256
8 0.0349
7 0.0402
6 0.0347
5 0.0444
4 0.0607
3 0.0613
2 0.0613
1 0.0573
0 0.0577
Name: Volatilidad_7, dtype: float6428
plot_model(data_train_plot_at28[-100:], data_val_plot_at28, data_test_plot_at28, val_preds_at_MLP28, test_preds_at_MLP28, "Predicciones usando Perceptrón Multicapa (MLP) para un horizonte de 28 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de validación(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train28_vola7, jarque_bera_pval_MLP_train28_vola7 = diagnostic_plots(y_train_vola28, train_preds_vola7_MLP28)
Ljung-Box LB Statistic: 784.151303
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola7_MLP_train28 = metricas(y_train_vola28,train_preds_vola7_MLP28)
metrica_vola7_MLP_train28.index = metrica_vola7_MLP_train28.index.map({0: 'MLP Entrenamiento Volatilidad ω = 7 y τ = 28'})
metrica_vola7_MLP_train28['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train28_vola7], index=metrica_vola7_MLP_train28.index)
metrica_vola7_MLP_train28['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train28_vola7], index=metrica_vola7_MLP_train28.index)
metrica_vola7_MLP_train28
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Volatilidad ω = 7 y τ = 28 | 3.5159 | 19.21% | 0.01 | 0.0 | 83.16% | 1.5062e-172 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP28_vola7, jarque_bera_pvalMLP28_vola7 = evaluate_residuals(data_test_plot_vola28, test_preds_vola7_MLP28)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP28_test_vola7 = metricas(y_test_vola28,test_preds_vola7_MLP28)
metrica_MLP28_test_vola7.index = metrica_MLP28_test_vola7.index.map({0: 'MLP Prueba Volatilidad ω = 7 y τ = 28'})
metrica_MLP28_test_vola7['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP28_vola7], index=metrica_MLP28_test_vola7.index)
metrica_MLP28_test_vola7['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP28_vola7], index=metrica_MLP28_test_vola7.index)
metrica_MLP28_test_vola7
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Volatilidad ω = 7 y τ = 28 | 0.0005 | 26.09% | 0.0 | 0.0 | -6.44% | 2.7413e-05 | 0.6066 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola7_MPL28)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola28, train_preds_vola7_MLP28, y_val_vola28, val_preds_vola7_MLP28, y_test_vola28, test_preds_vola7_MLP28)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra muy cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo. De igual forma se observa una alta variabilidad en los residuales.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Volatilidad ω = 7 (Volatilidad_7): Memoria a Corto y Largo Plazo (LSTM) #
Ya definimos los regresores(X) y la variable objetivo(y) para el proceso de entrenamiento y validación en la selección correspondiente al modelo Perceptrones Multicapa a tráves de la función create_time_series_datasets(), sin embargo, ésta se utiliza para generar arreglos 2D de forma (números de muestras, números de paso de tiempo). Dado que la entrada a las capas de una RNN debe ser de forma: número de muestras, número de paso de tiempo, número de características por paso de tiempo; procedemos con la sefinición de la función change_dimension_lstm() para realizar la transformación de 2D a 3D.
Horizonte de 7 días (\(\tau=7\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_vola7_lstm_7, X_val_vola7_lstm_7, X_test_vola7_lstm_7 = change_dimension_lstm(X_train_vola7, X_val_vola7, X_test_vola7)
Shape of 3D arrays X: (4971, 7, 1) (7, 7, 1) (7, 7, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(7,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['tanh'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=20, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_vola7_lstm_7, y_train_vola7)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=tanh, epochs=20, optimizer=SGD; total time= 16.3s
[CV] END ..........activation=tanh, epochs=20, optimizer=SGD; total time= 16.3s
[CV] END ..........activation=tanh, epochs=20, optimizer=SGD; total time= 16.4s
[CV] END ..........activation=tanh, epochs=20, optimizer=SGD; total time= 16.4s
[CV] END ..........activation=tanh, epochs=20, optimizer=SGD; total time= 16.6s
Mejor función de activación: tanh
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 0.02579855326376012
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape7 = 7
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM7_vola7 = build_models_lstm(input_shape7, neurons_list, dropout_rates ,'SGD')
Model: "model_341"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_342 (InputLayer) [(None, 7, 1)] 0
lstm_274 (LSTM) (None, 7, 64) 16896
lstm_275 (LSTM) (None, 32) 12416
dropout_341 (Dropout) (None, 32) 0
dense_953 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_341"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_342 (InputLayer) [(None, 7, 1)] 0
lstm_274 (LSTM) (None, 7, 64) 16896
lstm_275 (LSTM) (None, 32) 12416
dropout_341 (Dropout) (None, 32) 0
dense_953 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_342"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_343 (InputLayer) [(None, 7, 1)] 0
lstm_276 (LSTM) (None, 7, 64) 16896
lstm_277 (LSTM) (None, 32) 12416
dropout_342 (Dropout) (None, 32) 0
dense_954 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_342"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_343 (InputLayer) [(None, 7, 1)] 0
lstm_276 (LSTM) (None, 7, 64) 16896
lstm_277 (LSTM) (None, 32) 12416
dropout_342 (Dropout) (None, 32) 0
dense_954 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_343"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_344 (InputLayer) [(None, 7, 1)] 0
lstm_278 (LSTM) (None, 7, 64) 16896
lstm_279 (LSTM) (None, 32) 12416
dropout_343 (Dropout) (None, 32) 0
dense_955 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_343"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_344 (InputLayer) [(None, 7, 1)] 0
lstm_278 (LSTM) (None, 7, 64) 16896
lstm_279 (LSTM) (None, 32) 12416
dropout_343 (Dropout) (None, 32) 0
dense_955 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_344"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_345 (InputLayer) [(None, 7, 1)] 0
lstm_280 (LSTM) (None, 7, 64) 16896
lstm_281 (LSTM) (None, 32) 12416
dropout_344 (Dropout) (None, 32) 0
dense_956 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_344"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_345 (InputLayer) [(None, 7, 1)] 0
lstm_280 (LSTM) (None, 7, 64) 16896
lstm_281 (LSTM) (None, 32) 12416
dropout_344 (Dropout) (None, 32) 0
dense_956 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_345"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_346 (InputLayer) [(None, 7, 1)] 0
lstm_282 (LSTM) (None, 7, 64) 16896
lstm_283 (LSTM) (None, 32) 12416
dropout_345 (Dropout) (None, 32) 0
dense_957 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_345"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_346 (InputLayer) [(None, 7, 1)] 0
lstm_282 (LSTM) (None, 7, 64) 16896
lstm_283 (LSTM) (None, 32) 12416
dropout_345 (Dropout) (None, 32) 0
dense_957 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_346"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_347 (InputLayer) [(None, 7, 1)] 0
lstm_284 (LSTM) (None, 7, 64) 16896
lstm_285 (LSTM) (None, 32) 12416
dropout_346 (Dropout) (None, 32) 0
dense_958 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_346"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_347 (InputLayer) [(None, 7, 1)] 0
lstm_284 (LSTM) (None, 7, 64) 16896
lstm_285 (LSTM) (None, 32) 12416
dropout_346 (Dropout) (None, 32) 0
dense_958 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_347"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_348 (InputLayer) [(None, 7, 1)] 0
lstm_286 (LSTM) (None, 7, 64) 16896
lstm_287 (LSTM) (None, 32) 12416
dropout_347 (Dropout) (None, 32) 0
dense_959 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_347"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_348 (InputLayer) [(None, 7, 1)] 0
lstm_286 (LSTM) (None, 7, 64) 16896
lstm_287 (LSTM) (None, 32) 12416
dropout_347 (Dropout) (None, 32) 0
dense_959 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_348"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_349 (InputLayer) [(None, 7, 1)] 0
lstm_288 (LSTM) (None, 7, 64) 16896
lstm_289 (LSTM) (None, 32) 12416
dropout_348 (Dropout) (None, 32) 0
dense_960 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_348"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_349 (InputLayer) [(None, 7, 1)] 0
lstm_288 (LSTM) (None, 7, 64) 16896
lstm_289 (LSTM) (None, 32) 12416
dropout_348 (Dropout) (None, 32) 0
dense_960 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_349"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_350 (InputLayer) [(None, 7, 1)] 0
lstm_290 (LSTM) (None, 7, 64) 16896
lstm_291 (LSTM) (None, 32) 12416
dropout_349 (Dropout) (None, 32) 0
dense_961 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_349"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_350 (InputLayer) [(None, 7, 1)] 0
lstm_290 (LSTM) (None, 7, 64) 16896
lstm_291 (LSTM) (None, 32) 12416
dropout_349 (Dropout) (None, 32) 0
dense_961 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_350"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_351 (InputLayer) [(None, 7, 1)] 0
lstm_292 (LSTM) (None, 7, 64) 16896
lstm_293 (LSTM) (None, 32) 12416
dropout_350 (Dropout) (None, 32) 0
dense_962 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_350"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_351 (InputLayer) [(None, 7, 1)] 0
lstm_292 (LSTM) (None, 7, 64) 16896
lstm_293 (LSTM) (None, 32) 12416
dropout_350 (Dropout) (None, 32) 0
dense_962 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_351"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_352 (InputLayer) [(None, 7, 1)] 0
lstm_294 (LSTM) (None, 7, 64) 16896
lstm_295 (LSTM) (None, 32) 12416
dropout_351 (Dropout) (None, 32) 0
dense_963 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_351"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_352 (InputLayer) [(None, 7, 1)] 0
lstm_294 (LSTM) (None, 7, 64) 16896
lstm_295 (LSTM) (None, 32) 12416
dropout_351 (Dropout) (None, 32) 0
dense_963 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_352"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_353 (InputLayer) [(None, 7, 1)] 0
lstm_296 (LSTM) (None, 7, 64) 16896
lstm_297 (LSTM) (None, 32) 12416
dropout_352 (Dropout) (None, 32) 0
dense_964 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_352"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_353 (InputLayer) [(None, 7, 1)] 0
lstm_296 (LSTM) (None, 7, 64) 16896
lstm_297 (LSTM) (None, 32) 12416
dropout_352 (Dropout) (None, 32) 0
dense_964 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_353"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_354 (InputLayer) [(None, 7, 1)] 0
lstm_298 (LSTM) (None, 7, 64) 16896
lstm_299 (LSTM) (None, 32) 12416
dropout_353 (Dropout) (None, 32) 0
dense_965 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_353"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_354 (InputLayer) [(None, 7, 1)] 0
lstm_298 (LSTM) (None, 7, 64) 16896
lstm_299 (LSTM) (None, 32) 12416
dropout_353 (Dropout) (None, 32) 0
dense_965 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_354"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_355 (InputLayer) [(None, 7, 1)] 0
lstm_300 (LSTM) (None, 7, 64) 16896
lstm_301 (LSTM) (None, 32) 12416
dropout_354 (Dropout) (None, 32) 0
dense_966 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_354"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_355 (InputLayer) [(None, 7, 1)] 0
lstm_300 (LSTM) (None, 7, 64) 16896
lstm_301 (LSTM) (None, 32) 12416
dropout_354 (Dropout) (None, 32) 0
dense_966 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_355"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_356 (InputLayer) [(None, 7, 1)] 0
lstm_302 (LSTM) (None, 7, 64) 16896
lstm_303 (LSTM) (None, 32) 12416
dropout_355 (Dropout) (None, 32) 0
dense_967 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_355"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_356 (InputLayer) [(None, 7, 1)] 0
lstm_302 (LSTM) (None, 7, 64) 16896
lstm_303 (LSTM) (None, 32) 12416
dropout_355 (Dropout) (None, 32) 0
dense_967 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_356"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_357 (InputLayer) [(None, 7, 1)] 0
lstm_304 (LSTM) (None, 7, 64) 16896
lstm_305 (LSTM) (None, 32) 12416
dropout_356 (Dropout) (None, 32) 0
dense_968 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_356"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_357 (InputLayer) [(None, 7, 1)] 0
lstm_304 (LSTM) (None, 7, 64) 16896
lstm_305 (LSTM) (None, 32) 12416
dropout_356 (Dropout) (None, 32) 0
dense_968 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola7_LSTM_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best7_lstm_vola7 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM7.
import os
from joblib import dump, load
# Suponiendo que ts_model es una lista de modelos
history_vola7_LSTM7 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM7_vola7):
filename = f'history_vola7_LSTM_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola7_lstm_7, y=y_train_vola7, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best7_lstm_vola7], validation_data=(X_val_vola7_lstm_7, y_val_vola7),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola7_LSTM7.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola7_LSTM_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola7_LSTM_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best7_lstm_vola7 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best7_lstm_vola7 = load_model(best_model_path) # Cargar el modelo
if best7_lstm_vola7 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola7_LSTM_weights.02-0.0038.keras con val_loss: 0.0038
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best7_lstm_vola7 is not None:
# Asegúrate de que X_val_price7 y X_test_price7 tengan la forma correcta
train_preds_vola7_LSTM7 = best7_lstm_vola7.predict(X_train_vola7_lstm_7)
val_preds_vola7_LSTM7 = best7_lstm_vola7.predict(X_val_vola7_lstm_7)
test_preds_vola7_LSTM7 = best7_lstm_vola7.predict(X_test_vola7_lstm_7)
# Aplanar las predicciones si es necesario
train_preds_vola7_LSTM7 = np.squeeze(train_preds_vola7_LSTM7)
val_preds_vola7_LSTM7 = np.squeeze(val_preds_vola7_LSTM7)
test_preds_vola7_LSTM7 = np.squeeze(test_preds_vola7_LSTM7)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola7_LSTM7)
print("Predicciones de validación:", val_preds_vola7_LSTM7)
print("Predicciones de prueba:", test_preds_vola7_LSTM7)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
156/156 [==============================] - 1s 870us/step
1/1 [==============================] - 0s 8ms/step
1/1 [==============================] - 0s 8ms/step
Predicciones de Entrenamiento: [0.0060063 0.0060063 0.0060063 ... 0.04061764 0.04177225 0.04151653]
Predicciones de validación: [0.04008937 0.04065327 0.04671964 0.04801904 0.04625323 0.04403771
0.04243068]
Predicciones de prueba: [0.04162169 0.0383867 0.02928428 0.02127487 0.01943525 0.02195254
0.02877884]
plot_model(data_train_plot_vola7[-100:], data_val_plot_vola7, data_test_plot_vola7, val_preds_vola7_LSTM7, test_preds_vola7_LSTM7, "Predicciones usando Memoria a Corto y Largo Paso (LSTM) para un horizonte de 7 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train7_vola7, jarque_bera_pval_LSTM_train7_vola7 = diagnostic_plots(y_train_vola7, train_preds_vola7_LSTM7)
Ljung-Box LB Statistic: 927.516572
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola7_LSTM_train = metricas(y_train_vola7,train_preds_vola7_LSTM7)
metrica_vola7_LSTM_train.index = metrica_vola7_LSTM_train.index.map({0: 'LSTM Entrenamiento Volatilidad ω = 7 y τ = 7'})
metrica_vola7_LSTM_train['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train7_vola7], index=metrica_vola7_LSTM_train.index)
metrica_vola7_LSTM_train['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train7_vola7], index=metrica_vola7_LSTM_train.index)
metrica_vola7_LSTM_train
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Volatilidad ω = 7 y τ = 7 | 3.7767 | 25.3% | 0.01 | 0.0 | 81.96% | 1.0236e-203 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM7_vola7, jarque_bera_pvalLSTM7_vola7 = evaluate_residuals(data_test_plot_vola7, test_preds_vola7_LSTM7)
No se rechaza H0: los residuales son independientes (no correlacionados).
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test_vola7 = metricas(y_test_vola7,test_preds_vola7_LSTM7)
metrica_LSTM_test_vola7.index = metrica_LSTM_test_vola7.index.map({0: 'LSTM Prueba Volatilidad ω = 7 y τ = 7'})
metrica_LSTM_test_vola7['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM7_vola7], index=metrica_LSTM_test_vola7.index)
metrica_LSTM_test_vola7['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM7_vola7], index=metrica_LSTM_test_vola7.index)
metrica_LSTM_test_vola7
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Volatilidad ω = 7 y τ = 7 | 0.0009 | 40.63% | 0.01 | 0.0 | -79.71% | 0.1 | 0.5787 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola7_LSTM7)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola7, train_preds_vola7_LSTM7, y_val_vola7, val_preds_vola7_LSTM7, y_test_vola7, test_preds_vola7_LSTM7)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 14 días (\(\tau=14\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_vola7_lstm_14, X_val_vola7_lstm_14, X_test_vola7_lstm_14 = change_dimension_lstm(X_train_vola14, X_val_vola14, X_test_vola14)
Shape of 3D arrays X: (4943, 14, 1) (14, 14, 1) (14, 14, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(14,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=10, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_vola7_lstm_14, y_train_vola14)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 33.7s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 33.8s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 34.3s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 34.3s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 34.4s
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 0.02838744101120887
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape14 = 14
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM14_vola7 = build_models_lstm(input_shape14, neurons_list, dropout_rates, 'SGD')
Model: "model_358"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_359 (InputLayer) [(None, 14, 1)] 0
lstm_308 (LSTM) (None, 14, 64) 16896
lstm_309 (LSTM) (None, 32) 12416
dropout_358 (Dropout) (None, 32) 0
dense_970 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_358"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_359 (InputLayer) [(None, 14, 1)] 0
lstm_308 (LSTM) (None, 14, 64) 16896
lstm_309 (LSTM) (None, 32) 12416
dropout_358 (Dropout) (None, 32) 0
dense_970 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_359"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_360 (InputLayer) [(None, 14, 1)] 0
lstm_310 (LSTM) (None, 14, 64) 16896
lstm_311 (LSTM) (None, 32) 12416
dropout_359 (Dropout) (None, 32) 0
dense_971 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_359"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_360 (InputLayer) [(None, 14, 1)] 0
lstm_310 (LSTM) (None, 14, 64) 16896
lstm_311 (LSTM) (None, 32) 12416
dropout_359 (Dropout) (None, 32) 0
dense_971 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_360"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_361 (InputLayer) [(None, 14, 1)] 0
lstm_312 (LSTM) (None, 14, 64) 16896
lstm_313 (LSTM) (None, 32) 12416
dropout_360 (Dropout) (None, 32) 0
dense_972 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_360"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_361 (InputLayer) [(None, 14, 1)] 0
lstm_312 (LSTM) (None, 14, 64) 16896
lstm_313 (LSTM) (None, 32) 12416
dropout_360 (Dropout) (None, 32) 0
dense_972 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_361"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_362 (InputLayer) [(None, 14, 1)] 0
lstm_314 (LSTM) (None, 14, 64) 16896
lstm_315 (LSTM) (None, 32) 12416
dropout_361 (Dropout) (None, 32) 0
dense_973 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_361"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_362 (InputLayer) [(None, 14, 1)] 0
lstm_314 (LSTM) (None, 14, 64) 16896
lstm_315 (LSTM) (None, 32) 12416
dropout_361 (Dropout) (None, 32) 0
dense_973 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_362"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_363 (InputLayer) [(None, 14, 1)] 0
lstm_316 (LSTM) (None, 14, 64) 16896
lstm_317 (LSTM) (None, 32) 12416
dropout_362 (Dropout) (None, 32) 0
dense_974 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_362"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_363 (InputLayer) [(None, 14, 1)] 0
lstm_316 (LSTM) (None, 14, 64) 16896
lstm_317 (LSTM) (None, 32) 12416
dropout_362 (Dropout) (None, 32) 0
dense_974 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_363"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_364 (InputLayer) [(None, 14, 1)] 0
lstm_318 (LSTM) (None, 14, 64) 16896
lstm_319 (LSTM) (None, 32) 12416
dropout_363 (Dropout) (None, 32) 0
dense_975 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_363"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_364 (InputLayer) [(None, 14, 1)] 0
lstm_318 (LSTM) (None, 14, 64) 16896
lstm_319 (LSTM) (None, 32) 12416
dropout_363 (Dropout) (None, 32) 0
dense_975 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_364"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_365 (InputLayer) [(None, 14, 1)] 0
lstm_320 (LSTM) (None, 14, 64) 16896
lstm_321 (LSTM) (None, 32) 12416
dropout_364 (Dropout) (None, 32) 0
dense_976 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_364"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_365 (InputLayer) [(None, 14, 1)] 0
lstm_320 (LSTM) (None, 14, 64) 16896
lstm_321 (LSTM) (None, 32) 12416
dropout_364 (Dropout) (None, 32) 0
dense_976 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_365"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_366 (InputLayer) [(None, 14, 1)] 0
lstm_322 (LSTM) (None, 14, 64) 16896
lstm_323 (LSTM) (None, 32) 12416
dropout_365 (Dropout) (None, 32) 0
dense_977 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_365"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_366 (InputLayer) [(None, 14, 1)] 0
lstm_322 (LSTM) (None, 14, 64) 16896
lstm_323 (LSTM) (None, 32) 12416
dropout_365 (Dropout) (None, 32) 0
dense_977 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_366"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_367 (InputLayer) [(None, 14, 1)] 0
lstm_324 (LSTM) (None, 14, 64) 16896
lstm_325 (LSTM) (None, 32) 12416
dropout_366 (Dropout) (None, 32) 0
dense_978 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_366"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_367 (InputLayer) [(None, 14, 1)] 0
lstm_324 (LSTM) (None, 14, 64) 16896
lstm_325 (LSTM) (None, 32) 12416
dropout_366 (Dropout) (None, 32) 0
dense_978 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_367"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_368 (InputLayer) [(None, 14, 1)] 0
lstm_326 (LSTM) (None, 14, 64) 16896
lstm_327 (LSTM) (None, 32) 12416
dropout_367 (Dropout) (None, 32) 0
dense_979 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_367"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_368 (InputLayer) [(None, 14, 1)] 0
lstm_326 (LSTM) (None, 14, 64) 16896
lstm_327 (LSTM) (None, 32) 12416
dropout_367 (Dropout) (None, 32) 0
dense_979 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_368"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_369 (InputLayer) [(None, 14, 1)] 0
lstm_328 (LSTM) (None, 14, 64) 16896
lstm_329 (LSTM) (None, 32) 12416
dropout_368 (Dropout) (None, 32) 0
dense_980 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_368"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_369 (InputLayer) [(None, 14, 1)] 0
lstm_328 (LSTM) (None, 14, 64) 16896
lstm_329 (LSTM) (None, 32) 12416
dropout_368 (Dropout) (None, 32) 0
dense_980 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_369"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_370 (InputLayer) [(None, 14, 1)] 0
lstm_330 (LSTM) (None, 14, 64) 16896
lstm_331 (LSTM) (None, 32) 12416
dropout_369 (Dropout) (None, 32) 0
dense_981 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_369"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_370 (InputLayer) [(None, 14, 1)] 0
lstm_330 (LSTM) (None, 14, 64) 16896
lstm_331 (LSTM) (None, 32) 12416
dropout_369 (Dropout) (None, 32) 0
dense_981 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_370"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_371 (InputLayer) [(None, 14, 1)] 0
lstm_332 (LSTM) (None, 14, 64) 16896
lstm_333 (LSTM) (None, 32) 12416
dropout_370 (Dropout) (None, 32) 0
dense_982 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_370"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_371 (InputLayer) [(None, 14, 1)] 0
lstm_332 (LSTM) (None, 14, 64) 16896
lstm_333 (LSTM) (None, 32) 12416
dropout_370 (Dropout) (None, 32) 0
dense_982 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_371"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_372 (InputLayer) [(None, 14, 1)] 0
lstm_334 (LSTM) (None, 14, 64) 16896
lstm_335 (LSTM) (None, 32) 12416
dropout_371 (Dropout) (None, 32) 0
dense_983 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_371"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_372 (InputLayer) [(None, 14, 1)] 0
lstm_334 (LSTM) (None, 14, 64) 16896
lstm_335 (LSTM) (None, 32) 12416
dropout_371 (Dropout) (None, 32) 0
dense_983 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_372"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_373 (InputLayer) [(None, 14, 1)] 0
lstm_336 (LSTM) (None, 14, 64) 16896
lstm_337 (LSTM) (None, 32) 12416
dropout_372 (Dropout) (None, 32) 0
dense_984 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_372"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_373 (InputLayer) [(None, 14, 1)] 0
lstm_336 (LSTM) (None, 14, 64) 16896
lstm_337 (LSTM) (None, 32) 12416
dropout_372 (Dropout) (None, 32) 0
dense_984 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_373"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_374 (InputLayer) [(None, 14, 1)] 0
lstm_338 (LSTM) (None, 14, 64) 16896
lstm_339 (LSTM) (None, 32) 12416
dropout_373 (Dropout) (None, 32) 0
dense_985 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_373"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_374 (InputLayer) [(None, 14, 1)] 0
lstm_338 (LSTM) (None, 14, 64) 16896
lstm_339 (LSTM) (None, 32) 12416
dropout_373 (Dropout) (None, 32) 0
dense_985 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola7_LSTM14_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best14_lstm_vola7 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM7.
import os
from joblib import dump, load
history_vola7_LSTM14 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM14_vola7):
filename = f'history_vola7_LSTM14_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola7_lstm_14, y=y_train_vola14, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best14_lstm_vola7], validation_data=(X_val_vola7_lstm_14, y_val_vola14),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola7_LSTM14.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola7_LSTM14_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM14_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM14_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM14_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM14_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM14_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM14_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM14_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM14_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM14_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM14_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM14_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM14_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM14_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM14_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM14_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola7_LSTM14_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best14_lstm_vola7 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best14_lstm_vola7 = load_model(best_model_path) # Cargar el modelo
if best14_lstm_vola7 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola7_LSTM14_weights.14-0.0036.keras con val_loss: 0.0036
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best14_lstm_vola7 is not None:
train_preds_vola7_LSTM14 = best14_lstm_vola7.predict(X_train_vola7_lstm_14)
val_preds_vola7_LSTM14 = best14_lstm_vola7.predict(X_val_vola7_lstm_14)
test_preds_vola7_LSTM14 = best14_lstm_vola7.predict(X_test_vola7_lstm_14)
# Aplanar las predicciones si es necesario
train_preds_vola7_LSTM14 = np.squeeze(train_preds_vola7_LSTM14)
val_preds_vola7_LSTM14 = np.squeeze(val_preds_vola7_LSTM14)
test_preds_vola7_LSTM14 = np.squeeze(test_preds_vola7_LSTM14)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola7_LSTM14)
print("Predicciones de validación:", val_preds_vola7_LSTM14)
print("Predicciones de prueba:", test_preds_vola7_LSTM14)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
155/155 [==============================] - 1s 1ms/step
1/1 [==============================] - 0s 9ms/step
1/1 [==============================] - 0s 8ms/step
Predicciones de Entrenamiento: [-0.00274402 -0.00274402 -0.00274402 ... 0.00425673 0.00398743
0.00379878]
Predicciones de validación: [0.00347924 0.00337976 0.00298566 0.0025208 0.00217265 0.00227922
0.00226325 0.0023182 0.00274348 0.00308675 0.00319052 0.00285405
0.00234187 0.0020296 ]
Predicciones de prueba: [0.00218773 0.00248724 0.00311935 0.00375837 0.00466251 0.00545257
0.0056805 0.0056572 0.00546575 0.00549883 0.00661653 0.00749391
0.00847918 0.00949615]
plot_model(data_train_plot_vola7_14[-100:], data_val_plot_vola7_14, data_test_plot_vola7_14, val_preds_vola7_LSTM14, test_preds_vola7_LSTM14, "Predicciones usando Memoria a Corto y Largo Paso (LSTM)")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train14_vola7, jarque_bera_pval_LSTM_train14_vola7 = diagnostic_plots(y_train_vola14, train_preds_vola7_LSTM14)
Ljung-Box LB Statistic: 4077.494627
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola7_LSTM_train14 = metricas(y_train_vola14,train_preds_vola7_LSTM14)
metrica_vola7_LSTM_train14.index = metrica_vola7_LSTM_train14.index.map({0: 'LSTM Entrenamiento Volatilidada ω = 7 y τ = 14'})
metrica_vola7_LSTM_train14['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train14_vola7], index=metrica_vola7_LSTM_train14.index)
metrica_vola7_LSTM_train14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train14_vola7], index=metrica_vola7_LSTM_train14.index)
metrica_vola7_LSTM_train14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Volatilidada ω = 7 y τ = 14 | 22.5767 | 70.72% | 0.03 | 0.0 | -7.96% | 0.0 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM14_vola7, jarque_bera_pvalLSTM14_vola7 = evaluate_residuals(data_test_plot_vola7_14, test_preds_vola7_LSTM14)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test14_vola7 = metricas(y_test_vola14,test_preds_vola7_LSTM14)
metrica_LSTM_test14_vola7.index = metrica_LSTM_test14_vola7.index.map({0: 'LSTM Prueba Volatilidada ω = 7 y τ = 14'})
metrica_LSTM_test14_vola7['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM14_vola7], index=metrica_LSTM_test14_vola7.index)
metrica_LSTM_test14_vola7['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM14_vola7], index=metrica_LSTM_test14_vola7.index)
metrica_LSTM_test14_vola7
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Volatilidada ω = 7 y τ = 14 | 0.0181 | 86.89% | 0.04 | 0.0 | -2059.9% | 0.0006 | 0.5676 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola7_LSTM14)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola14, train_preds_vola7_LSTM14, y_val_vola14, val_preds_vola7_LSTM14, y_test_vola14, test_preds_vola7_LSTM14)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo, ademas muestra un sesgo considerable a la derecha y una alta variabilidad.
Conjunto de Validación: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 21 días (\(\tau=21\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_vola7_lstm_21, X_val_vola7_lstm_21, X_test_vola7_lstm_21 = change_dimension_lstm(X_train_vola21, X_val_vola21, X_test_vola21)
Shape of 3D arrays X: (4915, 21, 1) (21, 21, 1) (21, 21, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(21,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=10, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_vola7_lstm_21, y_train_vola21)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 33.7s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 34.1s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 34.2s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 34.2s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 34.3s
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 0.019228006469962713
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape21 = 21
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM21_vola7 = build_models_lstm(input_shape21, neurons_list, dropout_rates, 'SGD')
Model: "model_375"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_376 (InputLayer) [(None, 21, 1)] 0
lstm_342 (LSTM) (None, 21, 64) 16896
lstm_343 (LSTM) (None, 32) 12416
dropout_375 (Dropout) (None, 32) 0
dense_987 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_375"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_376 (InputLayer) [(None, 21, 1)] 0
lstm_342 (LSTM) (None, 21, 64) 16896
lstm_343 (LSTM) (None, 32) 12416
dropout_375 (Dropout) (None, 32) 0
dense_987 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_376"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_377 (InputLayer) [(None, 21, 1)] 0
lstm_344 (LSTM) (None, 21, 64) 16896
lstm_345 (LSTM) (None, 32) 12416
dropout_376 (Dropout) (None, 32) 0
dense_988 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_376"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_377 (InputLayer) [(None, 21, 1)] 0
lstm_344 (LSTM) (None, 21, 64) 16896
lstm_345 (LSTM) (None, 32) 12416
dropout_376 (Dropout) (None, 32) 0
dense_988 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_377"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_378 (InputLayer) [(None, 21, 1)] 0
lstm_346 (LSTM) (None, 21, 64) 16896
lstm_347 (LSTM) (None, 32) 12416
dropout_377 (Dropout) (None, 32) 0
dense_989 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_377"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_378 (InputLayer) [(None, 21, 1)] 0
lstm_346 (LSTM) (None, 21, 64) 16896
lstm_347 (LSTM) (None, 32) 12416
dropout_377 (Dropout) (None, 32) 0
dense_989 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_378"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_379 (InputLayer) [(None, 21, 1)] 0
lstm_348 (LSTM) (None, 21, 64) 16896
lstm_349 (LSTM) (None, 32) 12416
dropout_378 (Dropout) (None, 32) 0
dense_990 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_378"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_379 (InputLayer) [(None, 21, 1)] 0
lstm_348 (LSTM) (None, 21, 64) 16896
lstm_349 (LSTM) (None, 32) 12416
dropout_378 (Dropout) (None, 32) 0
dense_990 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_379"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_380 (InputLayer) [(None, 21, 1)] 0
lstm_350 (LSTM) (None, 21, 64) 16896
lstm_351 (LSTM) (None, 32) 12416
dropout_379 (Dropout) (None, 32) 0
dense_991 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_379"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_380 (InputLayer) [(None, 21, 1)] 0
lstm_350 (LSTM) (None, 21, 64) 16896
lstm_351 (LSTM) (None, 32) 12416
dropout_379 (Dropout) (None, 32) 0
dense_991 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_380"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_381 (InputLayer) [(None, 21, 1)] 0
lstm_352 (LSTM) (None, 21, 64) 16896
lstm_353 (LSTM) (None, 32) 12416
dropout_380 (Dropout) (None, 32) 0
dense_992 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_380"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_381 (InputLayer) [(None, 21, 1)] 0
lstm_352 (LSTM) (None, 21, 64) 16896
lstm_353 (LSTM) (None, 32) 12416
dropout_380 (Dropout) (None, 32) 0
dense_992 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_381"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_382 (InputLayer) [(None, 21, 1)] 0
lstm_354 (LSTM) (None, 21, 64) 16896
lstm_355 (LSTM) (None, 32) 12416
dropout_381 (Dropout) (None, 32) 0
dense_993 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_381"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_382 (InputLayer) [(None, 21, 1)] 0
lstm_354 (LSTM) (None, 21, 64) 16896
lstm_355 (LSTM) (None, 32) 12416
dropout_381 (Dropout) (None, 32) 0
dense_993 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_382"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_383 (InputLayer) [(None, 21, 1)] 0
lstm_356 (LSTM) (None, 21, 64) 16896
lstm_357 (LSTM) (None, 32) 12416
dropout_382 (Dropout) (None, 32) 0
dense_994 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_382"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_383 (InputLayer) [(None, 21, 1)] 0
lstm_356 (LSTM) (None, 21, 64) 16896
lstm_357 (LSTM) (None, 32) 12416
dropout_382 (Dropout) (None, 32) 0
dense_994 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_383"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_384 (InputLayer) [(None, 21, 1)] 0
lstm_358 (LSTM) (None, 21, 64) 16896
lstm_359 (LSTM) (None, 32) 12416
dropout_383 (Dropout) (None, 32) 0
dense_995 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_383"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_384 (InputLayer) [(None, 21, 1)] 0
lstm_358 (LSTM) (None, 21, 64) 16896
lstm_359 (LSTM) (None, 32) 12416
dropout_383 (Dropout) (None, 32) 0
dense_995 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_384"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_385 (InputLayer) [(None, 21, 1)] 0
lstm_360 (LSTM) (None, 21, 64) 16896
lstm_361 (LSTM) (None, 32) 12416
dropout_384 (Dropout) (None, 32) 0
dense_996 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_384"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_385 (InputLayer) [(None, 21, 1)] 0
lstm_360 (LSTM) (None, 21, 64) 16896
lstm_361 (LSTM) (None, 32) 12416
dropout_384 (Dropout) (None, 32) 0
dense_996 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_385"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_386 (InputLayer) [(None, 21, 1)] 0
lstm_362 (LSTM) (None, 21, 64) 16896
lstm_363 (LSTM) (None, 32) 12416
dropout_385 (Dropout) (None, 32) 0
dense_997 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_385"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_386 (InputLayer) [(None, 21, 1)] 0
lstm_362 (LSTM) (None, 21, 64) 16896
lstm_363 (LSTM) (None, 32) 12416
dropout_385 (Dropout) (None, 32) 0
dense_997 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_386"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_387 (InputLayer) [(None, 21, 1)] 0
lstm_364 (LSTM) (None, 21, 64) 16896
lstm_365 (LSTM) (None, 32) 12416
dropout_386 (Dropout) (None, 32) 0
dense_998 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_386"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_387 (InputLayer) [(None, 21, 1)] 0
lstm_364 (LSTM) (None, 21, 64) 16896
lstm_365 (LSTM) (None, 32) 12416
dropout_386 (Dropout) (None, 32) 0
dense_998 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_387"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_388 (InputLayer) [(None, 21, 1)] 0
lstm_366 (LSTM) (None, 21, 64) 16896
lstm_367 (LSTM) (None, 32) 12416
dropout_387 (Dropout) (None, 32) 0
dense_999 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_387"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_388 (InputLayer) [(None, 21, 1)] 0
lstm_366 (LSTM) (None, 21, 64) 16896
lstm_367 (LSTM) (None, 32) 12416
dropout_387 (Dropout) (None, 32) 0
dense_999 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_388"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_389 (InputLayer) [(None, 21, 1)] 0
lstm_368 (LSTM) (None, 21, 64) 16896
lstm_369 (LSTM) (None, 32) 12416
dropout_388 (Dropout) (None, 32) 0
dense_1000 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_388"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_389 (InputLayer) [(None, 21, 1)] 0
lstm_368 (LSTM) (None, 21, 64) 16896
lstm_369 (LSTM) (None, 32) 12416
dropout_388 (Dropout) (None, 32) 0
dense_1000 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_389"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_390 (InputLayer) [(None, 21, 1)] 0
lstm_370 (LSTM) (None, 21, 64) 16896
lstm_371 (LSTM) (None, 32) 12416
dropout_389 (Dropout) (None, 32) 0
dense_1001 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_389"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_390 (InputLayer) [(None, 21, 1)] 0
lstm_370 (LSTM) (None, 21, 64) 16896
lstm_371 (LSTM) (None, 32) 12416
dropout_389 (Dropout) (None, 32) 0
dense_1001 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_390"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_391 (InputLayer) [(None, 21, 1)] 0
lstm_372 (LSTM) (None, 21, 64) 16896
lstm_373 (LSTM) (None, 32) 12416
dropout_390 (Dropout) (None, 32) 0
dense_1002 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_390"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_391 (InputLayer) [(None, 21, 1)] 0
lstm_372 (LSTM) (None, 21, 64) 16896
lstm_373 (LSTM) (None, 32) 12416
dropout_390 (Dropout) (None, 32) 0
dense_1002 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola7_LSTM21_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best21_lstm_vola7 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM21.
import os
from joblib import dump, load
history_vola7_LSTM21 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM21_vola7):
filename = f'history_vola7_LSTM21_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola7_lstm_21, y=y_train_vola21, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best21_lstm_vola7], validation_data=(X_val_vola7_lstm_21, y_val_vola21),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola7_LSTM21.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola7_LSTM21_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM21_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM21_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM21_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM21_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM21_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM21_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM21_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM21_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM21_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM21_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM21_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM21_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM21_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM21_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM21_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola7_LSTM21_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best21_lstm_vola7 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best21_lstm_vola7 = load_model(best_model_path) # Cargar el modelo
if best21_lstm_vola7 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola7_LSTM21_weights.18-0.0029.keras con val_loss: 0.0029
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
print("Archivos en el directorio 'keras_models':", files)
Archivos en el directorio 'keras_models': ['PRSA_data_vola14_MLP7_weights.04-0.0018.keras', 'PRSA_data_price_MLP21_weights.01-3579.4128.keras', 'PRSA_data_price_LSTM21_weights.01-43062.3047.keras', 'PRSA_data_price_LSTM28_weights.02-42892.2305.keras', 'PRSA_data_price_LSTM_weights.19-67016.3047.keras', 'PRSA_data_at_LSTM28_weights.19-0.0651.keras', 'PRSA_data_vola14_MLP21_weights.20-0.0011.keras', 'PRSA_data_price_LSTM14_weights.06-51394.8750.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.07-0.0033.keras', 'PRSA_data_price_LSTM21_weights.02-43052.0469.keras', 'PRSA_data_at_MLP7_weights.80-0.0780.keras', 'PRSA_data_vola21_LSTM28_weights.02-0.0022.keras', 'PRSA_data_vola21_LSTM14_weights.04-0.0011.keras', 'PRSA_data_vola7_LSTM21_weights.18-0.0029.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0013.keras', 'PRSA_data_vola28_MLP14_weights.04-0.0013.keras', 'PRSA_data_price_MLP14_weights.52-626.2623.keras', 'PRSA_data_vola7_MLP7_weights.15-0.0041.keras', 'PRSA_data_price_LSTM14_weights.13-51325.8516.keras', 'PRSA_data_vola14_MLP7_weights.02-0.0023.keras', 'PRSA_data_price_LSTM21_weights.11-42962.6562.keras', 'PRSA_data_vola7_LSTM28_weights.10-0.0037.keras', 'PRSA_data_at_LSTM_weights.04-1.2071.keras', 'PRSA_data_price_MLP7_weights.50-1342.5273.keras', 'PRSA_data_price_MLP14_weights.03-5108.7837.keras', 'PRSA_data_vola28_MLP7_weights.14-0.0012.keras', 'PRSA_data_at_MLP7_weights.11-2.4883.keras', 'PRSA_data_price_LSTM28_weights.08-42832.5430.keras', 'PRSA_data_price_MLP28_weights.01-9242.6553.keras', 'PRSA_data_vola21_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.16-1013.2251.keras', 'PRSA_data_price_MLP28_weights.13-1042.5178.keras', 'PRSA_data_vola28_MLP28_weights.34-0.0006.keras', 'PRSA_data_at_MLP7_weights.01-19.6635.keras', 'PRSA_data_at_MLP7_weights.19-1.8379.keras', 'PRSA_data_at_MLP7_weights.12-2.3991.keras', 'PRSA_data_price_MLP7_weights.15-1346.3275.keras', 'PRSA_data_vola14_MLP28_weights.02-0.0024.keras', 'PRSA_data_at_LSTM21_weights.05-0.7513.keras', 'PRSA_data_vola14_MLP28_weights.07-0.0017.keras', 'PRSA_data_at_MLP28_weights.05-0.0930.keras', 'PRSA_data_at_LSTM28_weights.02-2.0782.keras', 'PRSA_data_price_LSTM14_weights.20-51256.7266.keras', 'PRSA_data_vola21_LSTM21_weights.05-0.0013.keras', 'PRSA_data_price_MLP14_weights.12-1304.7009.keras', 'PRSA_data_vola7_LSTM14_weights.09-0.0044.keras', 'PRSA_data_vola28_MLP28_weights.50-0.0006.keras', 'PRSA_data_vola7_MLP14_weights.15-0.0020.keras', 'PRSA_data_price_MLP14_weights.08-1386.5365.keras', 'PRSA_data_price_LSTM21_weights.09-42982.2383.keras', 'PRSA_data_price_MLP28_weights.11-7366.8994.keras', 'PRSA_data_vola14_MLP7_weights.18-0.0015.keras', 'PRSA_data_price_MLP7_weights.14-1734.7238.keras', 'PRSA_data_at_LSTM28_weights.03-1.3148.keras', 'PRSA_data_price_LSTM14_weights.15-51306.2070.keras', 'PRSA_data_vola21_MLP28_weights.25-0.0010.keras', 'PRSA_data_at_LSTM28_weights.10-0.2820.keras', 'PRSA_data_at_LSTM14_weights.02-2.2175.keras', 'PRSA_data_vola28_LSTM7_weights.08-0.0014.keras', 'PRSA_data_price_MLP28_weights.10-1107.4047.keras', 'PRSA_data_vola28_MLP7_weights.01-0.0068.keras', 'PRSA_data_vola21_LSTM21_weights.03-0.0015.keras', 'PRSA_data_price_MLP7_weights.01-9253.3047.keras', 'PRSA_data_vola21_MLP14_weights.31-0.0007.keras', 'PRSA_data_price_LSTM28_weights.12-42793.6172.keras', 'PRSA_data_price_LSTM14_weights.09-51365.0000.keras', 'PRSA_data_vola28_MLP28_weights.42-0.0006.keras', 'PRSA_data_price_MLP7_weights.05-4792.5054.keras', 'PRSA_data_vola21_LSTM_weights.01-0.0024.keras', 'PRSA_data_price_LSTM28_weights.17-42745.0352.keras', 'PRSA_data_vola28_MLP28_weights.45-0.0006.keras', 'PRSA_data_vola28_MLP14_weights.08-0.0010.keras', 'PRSA_data_at_LSTM_weights.11-0.6498.keras', 'PRSA_data_vola28_MLP28_weights.39-0.0007.keras', 'PRSA_data_at_MLP21_weights.11-0.0326.keras', 'PRSA_data_price_LSTM_weights.20-67006.4141.keras', 'PRSA_data_vola28_MLP7_weights.06-0.0014.keras', 'PRSA_data_at_LSTM14_weights.06-0.8145.keras', 'PRSA_data_at_MLP28_weights.01-2.1902.keras', 'PRSA_data_price_MLP14_weights.46-814.1992.keras', '.DS_Store', 'PRSA_data_at_LSTM21_weights.01-5.9452.keras', 'PRSA_data_vola28_LSTM28_weights.12-0.0006.keras', 'PRSA_data_price_LSTM14_weights.18-51276.5625.keras', 'PRSA_data_vola21_LSTM14_weights.01-0.0078.keras', 'PRSA_data_vola14_MLP21_weights.04-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM14_weights.02-0.0011.keras', 'PRSA_data_at_MLP7_weights.09-2.9793.keras', 'PRSA_data_vola21_MLP14_weights.06-0.0007.keras', 'PRSA_data_vola14_MLP21_weights.78-0.0010.keras', 'PRSA_data_price_MLP14_weights.64-711.9107.keras', 'PRSA_data_vola14_MLP28_weights.31-0.0011.keras', 'PRSA_data_vola14_LSTM_weights.08-0.0020.keras', 'PRSA_data_vola14_LSTM21_weights.02-0.0018.keras', 'PRSA_data_at_LSTM21_weights.02-2.0083.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0014.keras', 'PRSA_data_vola14_MLP14_weights.29-0.0005.keras', 'PRSA_data_price_LSTM_weights.03-67175.7734.keras', 'PRSA_data_vola14_LSTM_weights.01-0.0068.keras', 'PRSA_data_price_LSTM28_weights.11-42803.3086.keras', 'PRSA_data_price_MLP28_weights.15-7107.3306.keras', 'PRSA_data_vola14_MLP21_weights.49-0.0010.keras', 'PRSA_data_vola28_MLP7_weights.07-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0010.keras', 'PRSA_data_at_LSTM14_weights.15-0.4082.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0011.keras', 'PRSA_data_at_LSTM21_weights.19-0.3670.keras', 'PRSA_data_vola14_MLP14_weights.06-0.0006.keras', 'PRSA_data_vola14_LSTM21_weights.06-0.0012.keras', 'PRSA_data_price_LSTM_weights.17-67036.2891.keras', 'PRSA_data_vola28_MLP7_weights.03-0.0018.keras', 'PRSA_data_vola7_MLP28_weights.02-0.0039.keras', 'PRSA_data_vola7_LSTM28_weights.16-0.0035.keras', 'PRSA_data_vola21_MLP7_weights.20-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.33-0.0009.keras', 'PRSA_data_price_MLP21_weights.64-870.7020.keras', 'PRSA_data_vola21_MLP14_weights.22-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.14-0.0020.keras', 'PRSA_data_price_LSTM21_weights.17-42903.9648.keras', 'PRSA_data_vola21_MLP14_weights.01-0.0021.keras', 'PRSA_data_vola21_LSTM7_weights.16-0.0011.keras', 'PRSA_data_at_LSTM21_weights.17-0.5070.keras', 'PRSA_data_at_LSTM14_weights.11-0.6656.keras', 'PRSA_data_vola28_MLP7_weights.02-0.0019.keras', 'PRSA_data_vola14_MLP28_weights.05-0.0018.keras', 'PRSA_data_vola14_LSTM14_weights.08-0.0005.keras', 'PRSA_data_at_MLP21_weights.02-0.8171.keras', 'PRSA_data_price_MLP28_weights.02-8134.2202.keras', 'PRSA_data_price_LSTM28_weights.06-42852.3320.keras', 'PRSA_data_vola7_MLP14_weights.02-0.0036.keras', 'PRSA_data_vola14_LSTM28_weights.01-0.0026.keras', 'PRSA_data_at_MLP7_weights.04-9.8837.keras', 'PRSA_data_price_MLP28_weights.16-1274.4368.keras', 'PRSA_data_price_LSTM21_weights.07-43002.0078.keras', 'PRSA_data_vola7_MLP28_weights.23-0.0032.keras', 'PRSA_data_price_LSTM_weights.01-67196.3359.keras', 'PRSA_data_at_MLP28_weights.02-1.9156.keras', 'PRSA_data_vola14_MLP21_weights.02-0.0016.keras', 'PRSA_data_vola28_MLP14_weights.14-0.0007.keras', 'PRSA_data_price_LSTM28_weights.07-42842.4375.keras', 'PRSA_data_price_MLP14_weights.37-825.1805.keras', 'PRSA_data_vola7_MLP14_weights.07-0.0021.keras', 'PRSA_data_price_LSTM14_weights.04-51414.8945.keras', 'PRSA_data_price_MLP7_weights.42-1446.1339.keras', 'PRSA_data_price_MLP21_weights.57-932.6497.keras', 'PRSA_data_price_LSTM14_weights.19-51266.6133.keras', 'PRSA_data_vola21_MLP14_weights.34-0.0008.keras', 'PRSA_data_price_MLP14_weights.01-10485.8262.keras', 'PRSA_data_vola21_LSTM7_weights.08-0.0012.keras', 'PRSA_data_at_LSTM_weights.02-2.4966.keras', 'PRSA_data_at_LSTM28_weights.08-0.3981.keras', 'PRSA_data_vola21_LSTM28_weights.15-0.0009.keras', 'PRSA_data_price_LSTM_weights.20-67006.3047.keras', 'PRSA_data_price_LSTM_weights.14-67066.1016.keras', 'PRSA_data_price_LSTM21_weights.19-42884.2422.keras', 'PRSA_data_vola28_LSTM21_weights.18-0.0008.keras', 'PRSA_data_vola21_LSTM7_weights.04-0.0013.keras', 'PRSA_data_price_LSTM28_weights.20-42714.5508.keras', 'PRSA_data_at_LSTM14_weights.13-0.5609.keras', 'PRSA_data_vola28_LSTM28_weights.04-0.0009.keras', 'PRSA_data_vola14_MLP21_weights.01-0.0118.keras', 'PRSA_data_vola7_MLP21_weights.02-0.0032.keras', 'PRSA_data_at_MLP7_weights.74-0.2838.keras', 'PRSA_data_price_MLP28_weights.17-1217.0461.keras', 'PRSA_data_at_MLP21_weights.16-1.1973.keras', 'PRSA_data_vola7_MLP7_weights.02-0.0036.keras', 'PRSA_data_vola21_LSTM7_weights.02-0.0015.keras', 'PRSA_data_vola14_LSTM14_weights.04-0.0006.keras', 'PRSA_data_vola28_LSTM28_weights.18-0.0006.keras', 'PRSA_data_price_MLP21_weights.74-805.9489.keras', 'PRSA_data_vola21_MLP14_weights.02-0.0011.keras', 'PRSA_data_vola28_MLP21_weights.02-0.0012.keras', 'PRSA_data_vola21_LSTM_weights.11-0.0014.keras', 'PRSA_data_vola7_LSTM14_weights.14-0.0036.keras', 'PRSA_data_at_MLP28_weights.03-1.7712.keras', 'PRSA_data_price_LSTM21_weights.20-42874.3555.keras', 'PRSA_data_vola28_LSTM7_weights.20-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.04-0.0009.keras', 'PRSA_data_at_MLP7_weights.08-3.6716.keras', 'PRSA_data_at_MLP21_weights.04-0.0734.keras', 'PRSA_data_vola21_LSTM28_weights.03-0.0021.keras', 'PRSA_data_vola21_MLP14_weights.35-0.0008.keras', 'PRSA_data_vola28_LSTM7_weights.11-0.0012.keras', 'PRSA_data_price_LSTM28_weights.04-42872.1953.keras', 'PRSA_data_at_LSTM_weights.08-0.4803.keras', 'PRSA_data_vola28_MLP14_weights.05-0.0010.keras', 'PRSA_data_price_LSTM14_weights.14-51316.0625.keras', 'PRSA_data_vola7_MLP14_weights.01-0.0101.keras', 'PRSA_data_vola14_LSTM28_weights.17-0.0015.keras', 'PRSA_data_vola7_LSTM_weights.01-0.0040.keras', 'PRSA_data_vola14_MLP21_weights.09-0.0011.keras', 'PRSA_data_price_LSTM21_weights.05-43021.9570.keras', 'PRSA_data_vola28_MLP28_weights.03-0.0007.keras', 'PRSA_data_vola7_LSTM14_weights.10-0.0042.keras', 'PRSA_data_price_LSTM_weights.07-67135.3359.keras', 'PRSA_data_vola7_MLP14_weights.18-0.0020.keras', 'PRSA_data_price_LSTM21_weights.18-42894.1055.keras', 'PRSA_data_vola7_MLP7_weights.01-0.0042.keras', 'PRSA_data_vola14_LSTM_weights.05-0.0024.keras', 'PRSA_data_vola21_MLP14_weights.27-0.0008.keras', 'PRSA_data_vola28_MLP28_weights.02-0.0018.keras', 'PRSA_data_vola21_LSTM7_weights.03-0.0017.keras', 'PRSA_data_price_LSTM_weights.10-67105.4297.keras', 'PRSA_data_vola28_MLP7_weights.19-0.0010.keras', 'PRSA_data_vola7_MLP21_weights.03-0.0030.keras', 'PRSA_data_at_MLP7_weights.06-5.3724.keras', 'PRSA_data_at_LSTM_weights.05-1.1226.keras', 'PRSA_data_vola21_LSTM7_weights.01-0.0026.keras', 'PRSA_data_vola21_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola14_MLP21_weights.05-0.0012.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.08-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.47-0.0008.keras', 'PRSA_data_price_LSTM14_weights.16-51296.3633.keras', 'PRSA_data_at_MLP7_weights.02-16.3421.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0030.keras', 'PRSA_data_vola21_MLP21_weights.09-0.0009.keras', 'PRSA_data_at_LSTM14_weights.01-6.2168.keras', 'PRSA_data_vola28_LSTM21_weights.01-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.16-0.0009.keras', 'PRSA_data_vola21_LSTM28_weights.07-0.0016.keras', 'PRSA_data_vola21_MLP28_weights.19-0.0010.keras', 'PRSA_data_vola21_LSTM28_weights.14-0.0009.keras', 'PRSA_data_vola14_MLP7_weights.09-0.0018.keras', 'PRSA_data_vola21_MLP21_weights.02-0.0011.keras', 'PRSA_data_vola28_LSTM14_weights.03-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.09-0.0015.keras', 'PRSA_data_vola21_LSTM7_weights.09-0.0012.keras', 'PRSA_data_vola28_MLP21_weights.65-0.0007.keras', 'PRSA_data_vola21_MLP28_weights.15-0.0011.keras', 'PRSA_data_at_LSTM28_weights.04-0.7851.keras', 'PRSA_data_at_MLP7_weights.42-0.8694.keras', 'PRSA_data_vola21_LSTM28_weights.18-0.0008.keras', 'PRSA_data_vola21_MLP14_weights.04-0.0008.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0040.keras', 'PRSA_data_vola21_LSTM14_weights.12-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.15-0.0029.keras', 'PRSA_data_vola21_MLP28_weights.01-0.0026.keras', 'PRSA_data_price_LSTM_weights.13-67075.9453.keras', 'PRSA_data_vola14_MLP7_weights.01-0.0041.keras', 'PRSA_data_price_MLP21_weights.02-1476.1034.keras', 'PRSA_data_vola14_LSTM28_weights.19-0.0014.keras', 'PRSA_data_price_MLP14_weights.96-663.0834.keras', 'PRSA_data_vola7_MLP21_weights.94-0.0024.keras', 'PRSA_data_price_LSTM21_weights.03-43041.9570.keras', 'PRSA_data_price_MLP7_weights.12-3798.9531.keras', 'PRSA_data_price_LSTM28_weights.10-42813.0352.keras', 'PRSA_data_vola14_MLP28_weights.20-0.0014.keras', 'PRSA_data_vola7_MLP21_weights.04-0.0027.keras', 'PRSA_data_vola7_LSTM28_weights.18-0.0035.keras', 'PRSA_data_at_MLP7_weights.10-2.7632.keras', 'PRSA_data_at_LSTM21_weights.18-0.3561.keras', 'PRSA_data_price_LSTM_weights.06-67145.4141.keras', 'PRSA_data_price_LSTM28_weights.13-42783.9805.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM28_weights.01-42902.4570.keras', 'PRSA_data_price_LSTM_weights.20-67005.9609.keras', 'PRSA_data_vola7_LSTM21_weights.04-0.0034.keras', 'PRSA_data_at_MLP14_weights.03-0.0461.keras', 'PRSA_data_at_LSTM21_weights.07-0.5113.keras', 'PRSA_data_vola28_MLP28_weights.01-0.0042.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0455.keras', 'PRSA_data_price_MLP28_weights.17-1182.9105.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0018.keras', 'PRSA_data_vola28_MLP14_weights.38-0.0006.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0013.keras', 'PRSA_data_vola7_LSTM21_weights.01-0.0038.keras', 'PRSA_data_price_LSTM14_weights.17-51286.4570.keras', 'PRSA_data_price_LSTM28_weights.16-42754.8008.keras', 'PRSA_data_price_LSTM14_weights.11-51345.4023.keras', 'PRSA_data_price_MLP14_weights.22-638.4409.keras', 'PRSA_data_vola7_MLP28_weights.32-0.0029.keras', 'PRSA_data_price_MLP7_weights.48-1648.1864.keras', 'PRSA_data_vola28_LSTM28_weights.10-0.0007.keras', 'PRSA_data_vola21_LSTM_weights.03-0.0021.keras', 'PRSA_data_vola21_LSTM21_weights.13-0.0010.keras', 'PRSA_data_at_MLP7_weights.07-4.2627.keras', 'PRSA_data_vola7_LSTM21_weights.17-0.0029.keras', 'PRSA_data_vola21_LSTM28_weights.08-0.0011.keras', 'PRSA_data_price_MLP21_weights.05-1072.5081.keras', 'PRSA_data_at_LSTM21_weights.06-0.6219.keras', 'PRSA_data_vola7_LSTM21_weights.05-0.0030.keras', 'PRSA_data_price_MLP14_weights.29-856.8320.keras', 'PRSA_data_vola14_LSTM21_weights.11-0.0011.keras', 'PRSA_data_price_LSTM21_weights.10-42972.4219.keras', 'PRSA_data_at_LSTM21_weights.19-0.2744.keras', 'PRSA_data_price_LSTM_weights.02-67185.9453.keras', 'PRSA_data_vola28_MLP14_weights.21-0.0007.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.10-0.0009.keras', 'PRSA_data_price_LSTM28_weights.15-42764.5898.keras', 'PRSA_data_at_LSTM14_weights.07-0.7059.keras', 'PRSA_data_vola21_LSTM21_weights.01-0.0048.keras', 'PRSA_data_at_LSTM14_weights.03-1.3422.keras', 'PRSA_data_vola14_LSTM14_weights.02-0.0015.keras', 'PRSA_data_vola14_MLP14_weights.02-0.0007.keras', 'PRSA_data_vola7_LSTM21_weights.02-0.0034.keras', 'PRSA_data_vola7_MLP21_weights.21-0.0025.keras', 'PRSA_data_at_MLP7_weights.18-1.8396.keras', 'PRSA_data_price_LSTM_weights.11-67095.6172.keras', 'PRSA_data_at_LSTM14_weights.12-0.6378.keras', 'PRSA_data_vola21_LSTM21_weights.07-0.0012.keras', 'PRSA_data_vola7_MLP28_weights.45-0.0030.keras', 'PRSA_data_at_MLP7_weights.26-0.8897.keras', 'PRSA_data_vola21_MLP7_weights.01-0.0014.keras', 'PRSA_data_price_MLP14_weights.19-982.5739.keras', 'PRSA_data_vola14_LSTM_weights.16-0.0017.keras', 'PRSA_data_price_MLP21_weights.94-813.8954.keras', 'PRSA_data_vola7_LSTM21_weights.20-0.0029.keras', 'PRSA_data_at_LSTM_weights.03-1.4842.keras', 'PRSA_data_vola21_MLP28_weights.02-0.0015.keras', 'PRSA_data_price_LSTM21_weights.12-42952.9453.keras', 'PRSA_data_price_MLP28_weights.17-1019.2816.keras', 'PRSA_data_vola14_LSTM_weights.13-0.0019.keras', 'PRSA_data_vola7_LSTM_weights.02-0.0038.keras', 'PRSA_data_vola7_LSTM28_weights.02-0.0042.keras', 'PRSA_data_at_LSTM28_weights.12-0.0705.keras', 'PRSA_data_at_LSTM14_weights.13-0.2270.keras', 'PRSA_data_price_LSTM_weights.18-67026.2891.keras', 'PRSA_data_vola28_MLP21_weights.66-0.0007.keras', 'PRSA_data_at_MLP14_weights.42-0.0373.keras', 'PRSA_data_vola7_MLP14_weights.38-0.0020.keras', 'PRSA_data_vola7_LSTM21_weights.11-0.0029.keras', 'PRSA_data_vola21_LSTM21_weights.12-0.0010.keras', 'PRSA_data_at_LSTM_weights.14-0.5158.keras', 'PRSA_data_vola14_LSTM21_weights.01-0.0019.keras', 'PRSA_data_vola28_LSTM21_weights.11-0.0009.keras', 'PRSA_data_vola7_LSTM28_weights.05-0.0038.keras', 'PRSA_data_vola14_MLP14_weights.10-0.0005.keras', 'PRSA_data_at_MLP21_weights.03-2.1540.keras', 'PRSA_data_at_LSTM_weights.16-0.5377.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.44-0.0008.keras', 'PRSA_data_at_MLP7_weights.16-1.9941.keras', 'PRSA_data_vola28_LSTM28_weights.01-0.0010.keras', 'PRSA_data_vola28_LSTM14_weights.19-0.0006.keras', 'PRSA_data_price_LSTM21_weights.14-42933.4180.keras', 'PRSA_data_vola14_LSTM28_weights.04-0.0019.keras', 'PRSA_data_price_MLP21_weights.82-828.7976.keras', 'PRSA_data_vola28_MLP21_weights.01-0.0030.keras', 'PRSA_data_price_LSTM14_weights.10-51355.2070.keras', 'PRSA_data_vola21_MLP21_weights.18-0.0010.keras', 'PRSA_data_vola28_MLP21_weights.73-0.0007.keras', 'PRSA_data_vola28_MLP7_weights.20-0.0011.keras', 'PRSA_data_price_LSTM21_weights.13-42943.1719.keras', 'PRSA_data_vola28_MLP28_weights.36-0.0007.keras', 'PRSA_data_vola7_MLP14_weights.04-0.0025.keras', 'PRSA_data_at_MLP14_weights.02-0.2715.keras', 'PRSA_data_price_LSTM14_weights.05-51404.8945.keras', 'PRSA_data_price_LSTM28_weights.19-42725.4297.keras', 'PRSA_data_price_MLP28_weights.01-1561.6656.keras', 'PRSA_data_vola21_MLP7_weights.14-0.0011.keras', 'PRSA_data_vola7_MLP7_weights.23-0.0033.keras', 'PRSA_data_vola14_MLP28_weights.01-0.0026.keras', 'PRSA_data_at_MLP14_weights.29-0.0323.keras', 'PRSA_data_vola7_MLP21_weights.01-0.0033.keras', 'PRSA_data_vola21_LSTM_weights.19-0.0014.keras', 'PRSA_data_price_MLP7_weights.03-5953.8076.keras', 'PRSA_data_price_MLP21_weights.42-948.6628.keras', 'PRSA_data_vola21_LSTM14_weights.07-0.0008.keras', 'PRSA_data_vola14_LSTM28_weights.05-0.0018.keras', 'PRSA_data_vola7_LSTM28_weights.13-0.0037.keras', 'PRSA_data_vola21_MLP21_weights.01-0.0016.keras', 'PRSA_data_vola14_LSTM21_weights.08-0.0011.keras', 'PRSA_data_price_LSTM21_weights.06-43011.9570.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0019.keras', 'PRSA_data_vola7_MLP28_weights.16-0.0033.keras', 'PRSA_data_vola21_LSTM28_weights.16-0.0008.keras', 'PRSA_data_vola28_MLP14_weights.01-0.0031.keras', 'PRSA_data_vola14_MLP28_weights.11-0.0015.keras', 'PRSA_data_price_LSTM14_weights.03-51425.0000.keras', 'PRSA_data_vola7_LSTM28_weights.04-0.0039.keras', 'PRSA_data_vola28_LSTM7_weights.13-0.0011.keras', 'PRSA_data_price_LSTM14_weights.01-51445.4883.keras', 'PRSA_data_vola28_MLP28_weights.22-0.0006.keras', 'PRSA_data_price_LSTM_weights.09-67115.3359.keras', 'PRSA_data_vola28_LSTM7_weights.14-0.0011.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0050.keras', 'PRSA_data_vola21_MLP28_weights.17-0.0010.keras', 'PRSA_data_vola28_LSTM21_weights.09-0.0010.keras', 'PRSA_data_vola21_MLP14_weights.13-0.0010.keras', 'PRSA_data_vola14_MLP14_weights.01-0.0013.keras', 'PRSA_data_vola21_MLP28_weights.50-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.2184.keras', 'PRSA_data_vola14_LSTM_weights.02-0.0030.keras', 'PRSA_data_vola7_MLP28_weights.39-0.0030.keras', 'PRSA_data_price_LSTM21_weights.04-43031.9414.keras', 'PRSA_data_price_LSTM_weights.15-67056.2109.keras', 'PRSA_data_at_MLP7_weights.20-1.6814.keras', 'PRSA_data_vola28_LSTM7_weights.18-0.0010.keras', 'PRSA_data_at_MLP7_weights.95-0.2130.keras', 'PRSA_data_vola14_LSTM28_weights.03-0.0021.keras', 'PRSA_data_at_LSTM_weights.01-7.5653.keras', 'PRSA_data_at_MLP7_weights.05-7.2110.keras', 'PRSA_data_vola21_LSTM28_weights.20-0.0008.keras', 'PRSA_data_at_LSTM_weights.07-0.8569.keras', 'PRSA_data_vola14_LSTM28_weights.12-0.0014.keras', 'PRSA_data_price_LSTM21_weights.16-42913.7969.keras', 'PRSA_data_at_MLP28_weights.04-1.3772.keras', 'PRSA_data_price_LSTM_weights.08-67125.3047.keras', 'PRSA_data_price_LSTM14_weights.07-51384.8633.keras', 'PRSA_data_price_LSTM_weights.04-67165.6016.keras', 'PRSA_data_vola14_LSTM28_weights.15-0.0014.keras', 'PRSA_data_at_MLP14_weights.01-0.4195.keras', 'PRSA_data_price_LSTM28_weights.09-42822.7578.keras', 'PRSA_data_price_LSTM28_weights.03-42882.1953.keras', 'PRSA_data_at_LSTM14_weights.05-0.8408.keras', 'PRSA_data_vola7_MLP28_weights.08-0.0030.keras', 'PRSA_data_price_MLP14_weights.07-3004.8474.keras', 'PRSA_data_price_LSTM21_weights.15-42923.6367.keras', 'PRSA_data_vola7_MLP21_weights.81-0.0025.keras', 'PRSA_data_price_MLP21_weights.86-819.7106.keras', 'PRSA_data_vola28_MLP7_weights.10-0.0010.keras', 'PRSA_data_vola21_MLP28_weights.05-0.0012.keras', 'PRSA_data_vola21_MLP14_weights.20-0.0007.keras', 'PRSA_data_vola14_MLP28_weights.42-0.0011.keras', 'PRSA_data_price_LSTM_weights.12-67085.7578.keras', 'PRSA_data_vola28_MLP21_weights.13-0.0008.keras', 'PRSA_data_vola14_MLP28_weights.08-0.0016.keras', 'PRSA_data_at_LSTM28_weights.05-0.5407.keras', 'PRSA_data_at_MLP7_weights.86-0.0883.keras', 'PRSA_data_vola28_LSTM14_weights.01-0.0020.keras', 'PRSA_data_vola21_MLP21_weights.71-0.0010.keras', 'PRSA_data_at_MLP28_weights.09-0.1537.keras', 'PRSA_data_vola7_MLP28_weights.48-0.0030.keras', 'PRSA_data_at_MLP28_weights.12-0.0354.keras', 'PRSA_data_at_LSTM14_weights.04-0.9732.keras', 'PRSA_data_at_LSTM14_weights.15-0.2154.keras', 'PRSA_data_vola14_MLP28_weights.21-0.0013.keras', 'PRSA_data_vola21_MLP14_weights.25-0.0009.keras', 'PRSA_data_vola14_LSTM14_weights.01-0.0027.keras', 'PRSA_data_at_LSTM21_weights.20-0.0855.keras', 'PRSA_data_vola21_LSTM28_weights.01-0.0039.keras', 'PRSA_data_price_MLP14_weights.02-8059.2510.keras', 'PRSA_data_vola21_MLP7_weights.13-0.0013.keras', 'PRSA_data_vola28_MLP7_weights.04-0.0016.keras', 'PRSA_data_vola21_LSTM21_weights.02-0.0043.keras', 'PRSA_data_price_MLP14_weights.78-683.9230.keras', 'PRSA_data_price_MLP21_weights.19-1002.5925.keras', 'PRSA_data_vola28_MLP21_weights.92-0.0007.keras', 'PRSA_data_price_LSTM28_weights.14-42774.2500.keras', 'PRSA_data_vola21_MLP21_weights.06-0.0010.keras', 'PRSA_data_vola14_LSTM14_weights.03-0.0010.keras', 'PRSA_data_price_LSTM_weights.16-67046.2422.keras', 'PRSA_data_vola7_LSTM28_weights.07-0.0037.keras', 'PRSA_data_vola28_LSTM7_weights.01-0.0017.keras', 'PRSA_data_vola28_LSTM14_weights.02-0.0008.keras', 'PRSA_data_vola7_MLP14_weights.05-0.0022.keras', 'PRSA_data_at_MLP21_weights.03-0.2039.keras', 'PRSA_data_vola14_LSTM28_weights.02-0.0023.keras', 'PRSA_data_at_MLP7_weights.03-13.2212.keras', 'PRSA_data_vola7_LSTM14_weights.04-0.0037.keras', 'PRSA_data_vola14_MLP21_weights.69-0.0010.keras', 'PRSA_data_vola21_LSTM7_weights.18-0.0013.keras', 'PRSA_data_vola7_MLP21_weights.49-0.0025.keras', 'PRSA_data_vola28_MLP28_weights.31-0.0006.keras', 'PRSA_data_price_MLP21_weights.77-841.5739.keras', 'PRSA_data_vola28_MLP14_weights.35-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.26-0.0031.keras', 'PRSA_data_vola7_LSTM14_weights.01-0.0065.keras', 'PRSA_data_price_LSTM14_weights.02-51435.1133.keras', 'PRSA_data_price_LSTM28_weights.20-42714.7695.keras', 'PRSA_data_vola28_LSTM28_weights.16-0.0006.keras', 'PRSA_data_vola14_LSTM28_weights.06-0.0016.keras', 'PRSA_data_vola21_MLP21_weights.13-0.0010.keras', 'PRSA_data_vola28_MLP14_weights.32-0.0006.keras', 'PRSA_data_vola7_MLP28_weights.01-0.0043.keras', 'PRSA_data_vola21_MLP28_weights.08-0.0011.keras', 'PRSA_data_vola21_LSTM28_weights.10-0.0011.keras', 'PRSA_data_vola21_LSTM7_weights.14-0.0012.keras', 'PRSA_data_vola7_LSTM28_weights.01-0.0049.keras', 'PRSA_data_price_LSTM28_weights.20-42715.6250.keras', 'PRSA_data_vola21_LSTM7_weights.13-0.0012.keras', 'PRSA_data_price_LSTM_weights.05-67155.5078.keras', 'PRSA_data_price_MLP14_weights.14-1073.6493.keras', 'PRSA_data_vola14_LSTM_weights.03-0.0029.keras', 'PRSA_data_vola14_MLP7_weights.07-0.0021.keras', 'PRSA_data_at_MLP28_weights.03-0.0637.keras', 'PRSA_data_vola21_MLP28_weights.03-0.0012.keras', 'PRSA_data_at_MLP28_weights.05-0.1823.keras', 'PRSA_data_at_MLP21_weights.01-0.9762.keras', 'PRSA_data_vola7_LSTM28_weights.15-0.0035.keras', 'PRSA_data_price_LSTM21_weights.08-42992.0703.keras', 'PRSA_data_vola21_MLP21_weights.22-0.0010.keras', 'PRSA_data_at_LSTM28_weights.01-6.0769.keras', 'PRSA_data_vola28_LSTM28_weights.20-0.0006.keras', 'PRSA_data_at_MLP21_weights.01-4.3241.keras', 'PRSA_data_at_LSTM_weights.06-0.8671.keras', 'PRSA_data_price_MLP7_weights.37-1654.4709.keras', 'PRSA_data_at_LSTM_weights.20-0.4626.keras', 'PRSA_data_vola21_MLP28_weights.27-0.0009.keras', 'PRSA_data_at_LSTM_weights.19-0.4729.keras', 'PRSA_data_at_MLP7_weights.24-1.4718.keras', 'PRSA_data_at_MLP7_weights.59-0.4636.keras', 'PRSA_data_vola14_LSTM14_weights.06-0.0005.keras', 'PRSA_data_at_MLP7_weights.14-2.2322.keras', 'PRSA_data_vola7_LSTM14_weights.02-0.0196.keras', 'PRSA_data_at_LSTM14_weights.19-0.3356.keras', 'PRSA_data_price_LSTM28_weights.18-42735.2305.keras', 'PRSA_data_at_LSTM21_weights.04-0.8083.keras', 'PRSA_data_price_LSTM14_weights.12-51335.6523.keras', 'PRSA_data_vola21_LSTM_weights.06-0.0017.keras', 'PRSA_data_price_LSTM28_weights.05-42862.2383.keras', 'PRSA_data_price_MLP7_weights.10-1248.9592.keras', 'PRSA_data_vola21_MLP28_weights.35-0.0009.keras', 'PRSA_data_vola28_LSTM21_weights.03-0.0013.keras', 'PRSA_data_at_LSTM21_weights.03-1.0323.keras', 'PRSA_data_price_MLP14_weights.04-5028.7607.keras', 'PRSA_data_price_LSTM14_weights.08-51374.8945.keras', 'PRSA_data_vola28_MLP14_weights.48-0.0007.keras']
# Predicciones usando el mejor modelo
if best21_lstm_vola7 is not None:
train_preds_vola7_LSTM21 = best21_lstm_vola7.predict(X_train_vola7_lstm_21)
val_preds_vola7_LSTM21 = best21_lstm_vola7.predict(X_val_vola7_lstm_21)
test_preds_vola7_LSTM21 = best21_lstm_vola7.predict(X_test_vola7_lstm_21)
# Aplanar las predicciones si es necesario
train_preds_vola7_LSTM21 = np.squeeze(train_preds_vola7_LSTM21)
val_preds_vola7_LSTM21 = np.squeeze(val_preds_vola7_LSTM21)
test_preds_vola7_LSTM21 = np.squeeze(test_preds_vola7_LSTM21)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola7_LSTM21)
print("Predicciones de validación:", val_preds_vola7_LSTM21)
print("Predicciones de prueba:", test_preds_vola7_LSTM21)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
154/154 [==============================] - 1s 2ms/step
1/1 [==============================] - 0s 12ms/step
1/1 [==============================] - 0s 11ms/step
Predicciones de Entrenamiento: [0.00212656 0.00212656 0.00212656 ... 0.03160746 0.02004464 0.02042018]
Predicciones de validación: [0.01799716 0.02328799 0.02221119 0.02308705 0.02022305 0.02754546
0.02737788 0.02746651 0.01859613 0.02032319 0.02131993 0.0208174
0.01417792 0.01465186 0.01526489 0.00920259 0.00979322 0.01358811
0.01472801 0.01896005 0.01731177]
Predicciones de prueba: [0.01386316 0.01310514 0.01601098 0.01761301 0.01918736 0.01751395
0.0196478 0.01954441 0.01759475 0.01712902 0.00904147 0.01006424
0.0101563 0.01246948 0.01188461 0.02315512 0.02695226 0.03802651
0.03924298 0.036187 0.03816754]
plot_model(data_train_plot_vola21[-100:], data_val_plot_vola21, data_test_plot_vola21, val_preds_vola7_LSTM21, test_preds_vola7_LSTM21, "Predicciones usando Memoria a Corto y Largo Paso (LSTM)")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train21_vola7, jarque_bera_pval_LSTM_train21_vola7 = diagnostic_plots(y_train_vola21, train_preds_vola7_LSTM21)
Ljung-Box LB Statistic: 102.227037
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola7_LSTM_train21 = metricas(y_train_vola21,train_preds_vola7_LSTM21)
metrica_vola7_LSTM_train21.index = metrica_vola7_LSTM_train21.index.map({0: 'LSTM Entrenamiento Volatilidad ω = 7 y τ = 21'})
metrica_vola7_LSTM_train21['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train21_vola7], index=metrica_vola7_LSTM_train21.index)
metrica_vola7_LSTM_train21['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train21_vola7], index=metrica_vola7_LSTM_train21.index)
metrica_vola7_LSTM_train21
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Volatilidad ω = 7 y τ = 21 | 3.1251 | 16.51% | 0.01 | 0.0 | 85.04% | 4.9510e-24 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM21_vola7, jarque_bera_pvalLSTM21_vola7 = evaluate_residuals(data_test_plot_vola21, test_preds_vola7_LSTM21)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test21_vola7 = metricas(y_test_vola21,test_preds_vola7_LSTM21)
metrica_LSTM_test21_vola7.index = metrica_LSTM_test21_vola7.index.map({0: 'LSTM Prueba Volatilidad ω = 7 y τ = 21'})
metrica_LSTM_test21_vola7['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM21_vola7], index=metrica_LSTM_test21_vola7.index)
metrica_LSTM_test21_vola7['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM21_vola7], index=metrica_LSTM_test21_vola7.index)
metrica_LSTM_test21_vola7
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Volatilidad ω = 7 y τ = 21 | 0.0004 | 16.96% | 0.0 | 0.0 | 83.37% | 0.0013 | 0.3839 |
Horizonte de 28 días (\(\tau=28\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_vola7_lstm_28, X_val_vola7_lstm_28, X_test_vola7_lstm_28 = change_dimension_lstm(X_train_vola28, X_val_vola28, X_test_vola28)
Shape of 3D arrays X: (4887, 28, 1) (28, 28, 1) (28, 28, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(28,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=10, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_vola7_lstm_28, y_train_vola28)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 45.6s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 46.0s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 46.0s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 46.1s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 46.6s
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 0.02121390635641183
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape28 = 28
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM28_vola7 = build_models_lstm(input_shape28, neurons_list, dropout_rates, 'SGD')
Model: "model_392"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_393 (InputLayer) [(None, 28, 1)] 0
lstm_376 (LSTM) (None, 28, 64) 16896
lstm_377 (LSTM) (None, 32) 12416
dropout_392 (Dropout) (None, 32) 0
dense_1004 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_392"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_393 (InputLayer) [(None, 28, 1)] 0
lstm_376 (LSTM) (None, 28, 64) 16896
lstm_377 (LSTM) (None, 32) 12416
dropout_392 (Dropout) (None, 32) 0
dense_1004 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_393"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_394 (InputLayer) [(None, 28, 1)] 0
lstm_378 (LSTM) (None, 28, 64) 16896
lstm_379 (LSTM) (None, 32) 12416
dropout_393 (Dropout) (None, 32) 0
dense_1005 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_393"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_394 (InputLayer) [(None, 28, 1)] 0
lstm_378 (LSTM) (None, 28, 64) 16896
lstm_379 (LSTM) (None, 32) 12416
dropout_393 (Dropout) (None, 32) 0
dense_1005 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_394"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_395 (InputLayer) [(None, 28, 1)] 0
lstm_380 (LSTM) (None, 28, 64) 16896
lstm_381 (LSTM) (None, 32) 12416
dropout_394 (Dropout) (None, 32) 0
dense_1006 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_394"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_395 (InputLayer) [(None, 28, 1)] 0
lstm_380 (LSTM) (None, 28, 64) 16896
lstm_381 (LSTM) (None, 32) 12416
dropout_394 (Dropout) (None, 32) 0
dense_1006 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_395"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_396 (InputLayer) [(None, 28, 1)] 0
lstm_382 (LSTM) (None, 28, 64) 16896
lstm_383 (LSTM) (None, 32) 12416
dropout_395 (Dropout) (None, 32) 0
dense_1007 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_395"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_396 (InputLayer) [(None, 28, 1)] 0
lstm_382 (LSTM) (None, 28, 64) 16896
lstm_383 (LSTM) (None, 32) 12416
dropout_395 (Dropout) (None, 32) 0
dense_1007 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_396"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_397 (InputLayer) [(None, 28, 1)] 0
lstm_384 (LSTM) (None, 28, 64) 16896
lstm_385 (LSTM) (None, 32) 12416
dropout_396 (Dropout) (None, 32) 0
dense_1008 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_396"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_397 (InputLayer) [(None, 28, 1)] 0
lstm_384 (LSTM) (None, 28, 64) 16896
lstm_385 (LSTM) (None, 32) 12416
dropout_396 (Dropout) (None, 32) 0
dense_1008 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_397"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_398 (InputLayer) [(None, 28, 1)] 0
lstm_386 (LSTM) (None, 28, 64) 16896
lstm_387 (LSTM) (None, 32) 12416
dropout_397 (Dropout) (None, 32) 0
dense_1009 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_397"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_398 (InputLayer) [(None, 28, 1)] 0
lstm_386 (LSTM) (None, 28, 64) 16896
lstm_387 (LSTM) (None, 32) 12416
dropout_397 (Dropout) (None, 32) 0
dense_1009 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_398"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_399 (InputLayer) [(None, 28, 1)] 0
lstm_388 (LSTM) (None, 28, 64) 16896
lstm_389 (LSTM) (None, 32) 12416
dropout_398 (Dropout) (None, 32) 0
dense_1010 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_398"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_399 (InputLayer) [(None, 28, 1)] 0
lstm_388 (LSTM) (None, 28, 64) 16896
lstm_389 (LSTM) (None, 32) 12416
dropout_398 (Dropout) (None, 32) 0
dense_1010 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_399"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_400 (InputLayer) [(None, 28, 1)] 0
lstm_390 (LSTM) (None, 28, 64) 16896
lstm_391 (LSTM) (None, 32) 12416
dropout_399 (Dropout) (None, 32) 0
dense_1011 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_399"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_400 (InputLayer) [(None, 28, 1)] 0
lstm_390 (LSTM) (None, 28, 64) 16896
lstm_391 (LSTM) (None, 32) 12416
dropout_399 (Dropout) (None, 32) 0
dense_1011 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_400"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_401 (InputLayer) [(None, 28, 1)] 0
lstm_392 (LSTM) (None, 28, 64) 16896
lstm_393 (LSTM) (None, 32) 12416
dropout_400 (Dropout) (None, 32) 0
dense_1012 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_400"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_401 (InputLayer) [(None, 28, 1)] 0
lstm_392 (LSTM) (None, 28, 64) 16896
lstm_393 (LSTM) (None, 32) 12416
dropout_400 (Dropout) (None, 32) 0
dense_1012 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_401"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_402 (InputLayer) [(None, 28, 1)] 0
lstm_394 (LSTM) (None, 28, 64) 16896
lstm_395 (LSTM) (None, 32) 12416
dropout_401 (Dropout) (None, 32) 0
dense_1013 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_401"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_402 (InputLayer) [(None, 28, 1)] 0
lstm_394 (LSTM) (None, 28, 64) 16896
lstm_395 (LSTM) (None, 32) 12416
dropout_401 (Dropout) (None, 32) 0
dense_1013 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_402"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_403 (InputLayer) [(None, 28, 1)] 0
lstm_396 (LSTM) (None, 28, 64) 16896
lstm_397 (LSTM) (None, 32) 12416
dropout_402 (Dropout) (None, 32) 0
dense_1014 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_402"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_403 (InputLayer) [(None, 28, 1)] 0
lstm_396 (LSTM) (None, 28, 64) 16896
lstm_397 (LSTM) (None, 32) 12416
dropout_402 (Dropout) (None, 32) 0
dense_1014 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_403"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_404 (InputLayer) [(None, 28, 1)] 0
lstm_398 (LSTM) (None, 28, 64) 16896
lstm_399 (LSTM) (None, 32) 12416
dropout_403 (Dropout) (None, 32) 0
dense_1015 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_403"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_404 (InputLayer) [(None, 28, 1)] 0
lstm_398 (LSTM) (None, 28, 64) 16896
lstm_399 (LSTM) (None, 32) 12416
dropout_403 (Dropout) (None, 32) 0
dense_1015 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_404"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_405 (InputLayer) [(None, 28, 1)] 0
lstm_400 (LSTM) (None, 28, 64) 16896
lstm_401 (LSTM) (None, 32) 12416
dropout_404 (Dropout) (None, 32) 0
dense_1016 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_404"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_405 (InputLayer) [(None, 28, 1)] 0
lstm_400 (LSTM) (None, 28, 64) 16896
lstm_401 (LSTM) (None, 32) 12416
dropout_404 (Dropout) (None, 32) 0
dense_1016 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_405"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_406 (InputLayer) [(None, 28, 1)] 0
lstm_402 (LSTM) (None, 28, 64) 16896
lstm_403 (LSTM) (None, 32) 12416
dropout_405 (Dropout) (None, 32) 0
dense_1017 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_405"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_406 (InputLayer) [(None, 28, 1)] 0
lstm_402 (LSTM) (None, 28, 64) 16896
lstm_403 (LSTM) (None, 32) 12416
dropout_405 (Dropout) (None, 32) 0
dense_1017 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_406"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_407 (InputLayer) [(None, 28, 1)] 0
lstm_404 (LSTM) (None, 28, 64) 16896
lstm_405 (LSTM) (None, 32) 12416
dropout_406 (Dropout) (None, 32) 0
dense_1018 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_406"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_407 (InputLayer) [(None, 28, 1)] 0
lstm_404 (LSTM) (None, 28, 64) 16896
lstm_405 (LSTM) (None, 32) 12416
dropout_406 (Dropout) (None, 32) 0
dense_1018 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_407"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_408 (InputLayer) [(None, 28, 1)] 0
lstm_406 (LSTM) (None, 28, 64) 16896
lstm_407 (LSTM) (None, 32) 12416
dropout_407 (Dropout) (None, 32) 0
dense_1019 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_407"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_408 (InputLayer) [(None, 28, 1)] 0
lstm_406 (LSTM) (None, 28, 64) 16896
lstm_407 (LSTM) (None, 32) 12416
dropout_407 (Dropout) (None, 32) 0
dense_1019 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola7_LSTM28_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best28_lstm_vola7 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM28_at.
import os
from joblib import dump, load
history_vola7_LSTM28 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM28_vola7):
filename = f'history_vola7_LSTM28_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola7_lstm_28, y=y_train_vola28, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best28_lstm_vola7], validation_data=(X_val_vola7_lstm_28, y_val_vola28),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola7_LSTM28.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola7_LSTM28_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM28_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM28_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM28_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM28_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM28_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM28_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM28_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM28_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM28_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM28_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM28_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM28_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM28_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM28_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola7_LSTM28_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola7_LSTM28_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best28_lstm_vola7 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best28_lstm_vola7 = load_model(best_model_path) # Cargar el modelo
if best28_lstm_vola7 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola7_LSTM28_weights.16-0.0035.keras con val_loss: 0.0035
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best28_lstm_vola7 is not None:
train_preds_vola7_LSTM28 = best28_lstm_at.predict(X_train_vola7_lstm_28)
val_preds_vola7_LSTM28 = best28_lstm_at.predict(X_val_vola7_lstm_28)
test_preds_vola7_LSTM28 = best28_lstm_at.predict(X_test_vola7_lstm_28)
# Aplanar las predicciones si es necesario
train_preds_vola7_LSTM28 = np.squeeze(train_preds_vola7_LSTM28)
val_preds_vola7_LSTM28 = np.squeeze(val_preds_vola7_LSTM28)
test_preds_vola7_LSTM28 = np.squeeze(test_preds_vola7_LSTM28)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola7_LSTM28)
print("Predicciones de validación:", val_preds_vola7_LSTM28)
print("Predicciones de prueba:", test_preds_vola7_LSTM28)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
153/153 [==============================] - 1s 4ms/step
1/1 [==============================] - 0s 15ms/step
1/1 [==============================] - 0s 26ms/step
Predicciones de Entrenamiento: [-0.16464579 -0.16464579 -0.16464579 ... -0.15521419 -0.15435004
-0.15323317]
Predicciones de validación: [-0.15223265 -0.14996803 -0.14731658 -0.14387465 -0.1399244 -0.13663316
-0.13436258 -0.13323438 -0.13197696 -0.13088 -0.13150823 -0.13393974
-0.13469899 -0.13367212 -0.13190413 -0.13233006 -0.13432562 -0.13689518
-0.13935173 -0.14361513 -0.14895964 -0.15434349 -0.15805078 -0.15969312
-0.15963662 -0.15856397 -0.1554302 -0.15066576]
Predicciones de prueba: [-0.14557588 -0.14251196 -0.1415869 -0.1415354 -0.14236629 -0.14519548
-0.14926851 -0.15327644 -0.15760005 -0.16170609 -0.16421509 -0.16462171
-0.16225874 -0.15851378 -0.15507829 -0.15285218 -0.1508125 -0.14873588
-0.14664638 -0.14565146 -0.14520538 -0.14526379 -0.14600646 -0.14747202
-0.15073383 -0.15503848 -0.15885818 -0.16147268]
plot_model(data_train_plot_vola28[-100:], data_val_plot_vola28, data_test_plot_vola28, val_preds_vola7_LSTM28, test_preds_vola7_LSTM28, "Predicciones usando Memoria a Corto y Largo Paso (LSTM)")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train28_vola7, jarque_bera_pval_LSTM_train28_vola7 = diagnostic_plots(y_train_vola28, train_preds_vola7_LSTM28)
Ljung-Box LB Statistic: 3355.381660
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola7_LSTM_train28 = metricas(y_train_vola28,train_preds_vola7_LSTM28)
metrica_vola7_LSTM_train28.index = metrica_vola7_LSTM_train28.index.map({0: 'LSTM Entrenamiento Volatilidad ω = 7 y τ = 28'})
metrica_vola7_LSTM_train28['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train28_vola7], index=metrica_vola7_LSTM_train28.index)
metrica_vola7_LSTM_train28['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train28_vola7], index=metrica_vola7_LSTM_train28.index)
metrica_vola7_LSTM_train28
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Volatilidad ω = 7 y τ = 28 | 153.8155 | 722.37% | 0.17 | 0.03 | -636.93% | 0.0 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM28_vola7, jarque_bera_pvalLSTM28_vola7 = evaluate_residuals(data_test_plot_vola28, test_preds_vola7_LSTM28)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test28_vola7 = metricas(y_test_vola28,test_preds_vola7_LSTM28)
metrica_LSTM_test28_vola7.index = metrica_LSTM_test28_vola7.index.map({0: 'LSTM Prueba Volatilidad ω = 7 y τ = 28'})
metrica_LSTM_test28_vola7['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM28_vola7], index=metrica_LSTM_test28_vola7.index)
metrica_LSTM_test28_vola7['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM28_vola7], index=metrica_LSTM_test28_vola7.index)
metrica_LSTM_test28_vola7
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Volatilidad ω = 7 y τ = 28 | 0.7755 | 1222.52% | 0.17 | 0.03 | -164359.56% | 6.6758e-06 | 0.3047 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola7_LSTM28)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola28, train_preds_vola7_LSTM28, y_val_vola28, val_preds_vola7_LSTM28, y_test_vola28, test_preds_vola7_LSTM28)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra muy cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Volatilidad ω = 14 (Volatilidad_14): Perceptrones Multicapa (MLP)#
Horizonte de 7 días (\(\tau=7\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(7,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': [ 'tanh'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_vola14_7, y_train_vola14_7)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ....activation=tanh, epochs=20, learning_rate=0.001; total time= 3.7s
[CV] END ....activation=tanh, epochs=20, learning_rate=0.001; total time= 3.6s
[CV] END ....activation=tanh, epochs=20, learning_rate=0.001; total time= 3.7s
[CV] END ....activation=tanh, epochs=20, learning_rate=0.001; total time= 3.8s
[CV] END ....activation=tanh, epochs=20, learning_rate=0.001; total time= 4.0s
Mejor función de activación: tanh
Mejor número de epocas: 20
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -0.004182608053088188
A idexación de parámetros función build_models con base a lo indicado en el númeral 3 del parcial práctico.
input_shape7 = 7
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP7_vola14 = build_models_mlp(input_shape7, neurons_list, dropout_rates, 'relu')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_409"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_410 (InputLayer) [(None, 7)] 0
dense_1024 (Dense) (None, 32) 256
dense_1025 (Dense) (None, 16) 528
dense_1026 (Dense) (None, 16) 272
dropout_409 (Dropout) (None, 16) 0
dense_1027 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_410"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_411 (InputLayer) [(None, 7)] 0
dense_1028 (Dense) (None, 32) 256
dense_1029 (Dense) (None, 16) 528
dense_1030 (Dense) (None, 16) 272
dropout_410 (Dropout) (None, 16) 0
dense_1031 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_411"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_412 (InputLayer) [(None, 7)] 0
dense_1032 (Dense) (None, 32) 256
dense_1033 (Dense) (None, 16) 528
dense_1034 (Dense) (None, 16) 272
dropout_411 (Dropout) (None, 16) 0
dense_1035 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_412"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_413 (InputLayer) [(None, 7)] 0
dense_1036 (Dense) (None, 32) 256
dense_1037 (Dense) (None, 16) 528
dense_1038 (Dense) (None, 16) 272
dropout_412 (Dropout) (None, 16) 0
dense_1039 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_413"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_414 (InputLayer) [(None, 7)] 0
dense_1040 (Dense) (None, 32) 256
dense_1041 (Dense) (None, 16) 528
dense_1042 (Dense) (None, 16) 272
dropout_413 (Dropout) (None, 16) 0
dense_1043 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_414"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_415 (InputLayer) [(None, 7)] 0
dense_1044 (Dense) (None, 32) 256
dense_1045 (Dense) (None, 16) 528
dense_1046 (Dense) (None, 16) 272
dropout_414 (Dropout) (None, 16) 0
dense_1047 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_415"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_416 (InputLayer) [(None, 7)] 0
dense_1048 (Dense) (None, 32) 256
dense_1049 (Dense) (None, 16) 528
dense_1050 (Dense) (None, 16) 272
dropout_415 (Dropout) (None, 16) 0
dense_1051 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_416"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_417 (InputLayer) [(None, 7)] 0
dense_1052 (Dense) (None, 32) 256
dense_1053 (Dense) (None, 16) 528
dense_1054 (Dense) (None, 16) 272
dropout_416 (Dropout) (None, 16) 0
dense_1055 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_417"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_418 (InputLayer) [(None, 7)] 0
dense_1056 (Dense) (None, 32) 256
dense_1057 (Dense) (None, 16) 528
dense_1058 (Dense) (None, 16) 272
dropout_417 (Dropout) (None, 16) 0
dense_1059 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_418"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_419 (InputLayer) [(None, 7)] 0
dense_1060 (Dense) (None, 32) 256
dense_1061 (Dense) (None, 16) 528
dense_1062 (Dense) (None, 16) 272
dropout_418 (Dropout) (None, 16) 0
dense_1063 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_419"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_420 (InputLayer) [(None, 7)] 0
dense_1064 (Dense) (None, 32) 256
dense_1065 (Dense) (None, 16) 528
dense_1066 (Dense) (None, 16) 272
dropout_419 (Dropout) (None, 16) 0
dense_1067 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_420"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_421 (InputLayer) [(None, 7)] 0
dense_1068 (Dense) (None, 32) 256
dense_1069 (Dense) (None, 16) 528
dense_1070 (Dense) (None, 16) 272
dropout_420 (Dropout) (None, 16) 0
dense_1071 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_421"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_422 (InputLayer) [(None, 7)] 0
dense_1072 (Dense) (None, 32) 256
dense_1073 (Dense) (None, 16) 528
dense_1074 (Dense) (None, 16) 272
dropout_421 (Dropout) (None, 16) 0
dense_1075 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_422"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_423 (InputLayer) [(None, 7)] 0
dense_1076 (Dense) (None, 32) 256
dense_1077 (Dense) (None, 16) 528
dense_1078 (Dense) (None, 16) 272
dropout_422 (Dropout) (None, 16) 0
dense_1079 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_423"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_424 (InputLayer) [(None, 7)] 0
dense_1080 (Dense) (None, 32) 256
dense_1081 (Dense) (None, 16) 528
dense_1082 (Dense) (None, 16) 272
dropout_423 (Dropout) (None, 16) 0
dense_1083 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_424"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_425 (InputLayer) [(None, 7)] 0
dense_1084 (Dense) (None, 32) 256
dense_1085 (Dense) (None, 16) 528
dense_1086 (Dense) (None, 16) 272
dropout_424 (Dropout) (None, 16) 0
dense_1087 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola14_MLP7_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best7_vola14 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP7.
import os
from joblib import dump, load
history_vola14_MPL7 = []
# Iterar sobre cada modelo en la lista models_MLP7
for i, model in enumerate(models_MLP7_vola14):
filename = f'history_vola14_MPL7_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola14_7, y=y_train_vola14_7, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best7_vola14], validation_data=(X_val_vola14_7, y_val_vola14_7),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola14_MPL7.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola14_MPL7_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL7_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL7_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL7_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL7_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL7_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL7_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL7_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL7_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL7_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL7_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL7_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL7_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL7_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL7_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL7_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola14_MLP7_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model7_vola14 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model7_vola14 = load_model(best_model_path) # Cargar el modelo
if best_model7_vola14 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola14_MLP7_weights.18-0.0015.keras con val_loss: 0.0015
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best_model7_vola14 is not None:
train_preds_vola14_MLP7 = best_model7_vola14.predict(X_train_vola14_7)
val_preds_vola14_MLP7 = best_model7_vola14.predict(X_val_vola14_7)
test_preds_vola14_MLP7 = best_model7_vola14.predict(X_test_vola14_7)
# Aplanar las predicciones si es necesario
train_preds_vola14_MLP7 = np.squeeze(train_preds_vola14_MLP7)
val_preds_vola14_MLP7 = np.squeeze(val_preds_vola14_MLP7)
test_preds_vola14_MLP7 = np.squeeze(test_preds_vola14_MLP7)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola14_MLP7)
print("Predicciones de validación:", val_preds_vola14_MLP7)
print("Predicciones de prueba:", test_preds_vola14_MLP7)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
156/156 [==============================] - 0s 234us/step
1/1 [==============================] - 0s 9ms/step
1/1 [==============================] - 0s 12ms/step
Predicciones de Entrenamiento: [0.00764726 0.00764726 0.00764726 ... 0.03479 0.03484715 0.03467152]
Predicciones de validación: [0.03446774 0.03602793 0.04187463 0.04201755 0.0411687 0.04030391
0.04022796]
Predicciones de prueba: [0.03987313 0.0395385 0.03954877 0.03516226 0.0350638 0.03558356
0.03882359]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 28 días (\(\tau = 28\)).
data_train_plot_vola14_7, data_val_plot_vola14_7, data_test_plot_vola14_7 = data_plot(df_1_st['Volatilidad_14'], 7)
Datos de entrenamiento:
4998 0.0000
4997 0.0000
4996 0.0000
4995 0.0000
4994 0.0000
...
18 0.0424
17 0.0416
16 0.0406
15 0.0409
14 0.0408
Name: Volatilidad_14, Length: 4985, dtype: float647
Datos de validación:
13 0.0403
12 0.0403
11 0.0337
10 0.0338
9 0.0352
8 0.0395
7 0.0410
Name: Volatilidad_14, dtype: float647
Datos de prueba:
6 0.0351
5 0.0376
4 0.0445
3 0.0455
2 0.0456
1 0.0456
0 0.0478
Name: Volatilidad_14, dtype: float647
plot_model(data_train_plot_vola14_7[-100:], data_val_plot_vola14_7, data_test_plot_vola14_7, val_preds_vola14_MLP7, test_preds_vola14_MLP7, "Predicciones usando Perceptrón Multicapa (MLP)")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train7_vola14, jarque_bera_pval_MLP_train7_vola14 = diagnostic_plots(y_train_vola14_7, train_preds_vola14_MLP7)
Ljung-Box LB Statistic: 1033.132088
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola14_MLP_train = metricas(y_train_vola14_7,train_preds_vola14_MLP7)
metrica_vola14_MLP_train.index = metrica_vola14_MLP_train.index.map({0: 'MLP Entrenamiento Volatilidad ω = 14 y τ = 7'})
metrica_vola14_MLP_train['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train7_vola14], index=metrica_vola14_MLP_train.index)
metrica_vola14_MLP_train['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train7_vola14], index=metrica_vola14_MLP_train.index)
metrica_vola14_MLP_train
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Volatilidad ω = 14 y τ = 7 | 1.9199 | 16.54% | 0.01 | 0.0 | 90.16% | 1.1289e-226 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP7_vola14, jarque_bera_pvalMLP7_vola14 = evaluate_residuals(data_test_plot_vola14_7, test_preds_vola14_MLP7)
No se rechaza H0: los residuales son independientes (no correlacionados).
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP7_test_vola14 = metricas(y_test_vola14_7,test_preds_vola14_MLP7)
metrica_MLP7_test_vola14.index = metrica_MLP7_test_vola14.index.map({0: 'MLP Prueba Volatilidad ω = 14 y τ = 7'})
metrica_MLP7_test_vola14['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP7_vola14], index=metrica_MLP7_test_vola14.index)
metrica_MLP7_test_vola14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP7_vola14], index=metrica_MLP7_test_vola14.index)
metrica_MLP7_test_vola14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Volatilidad ω = 14 y τ = 7 | 5.8058e-05 | 5.78% | 0.0 | 0.0 | 12.02% | 0.0597 | 0.5949 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola14_MPL7)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola14_7, train_preds_vola14_MLP7, y_val_vola14_7, val_preds_vola14_MLP7, y_test_vola14_7, test_preds_vola14_MLP7)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra muy cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra muy cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 14 días (\(\tau=14\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(14,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': ['tanh'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [50], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_vola14_14, y_train_vola14_14)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 8.7s
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 8.9s
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 8.9s
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 9.1s
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 9.2s
Mejor función de activación: tanh
Mejor número de epocas: 50
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -0.004547589644789696
A continuación, configuramos la red MLP empleando la API funcional de Keras. Con este método, una capa puede ser designada como la entrada para la siguiente capa en el momento de su definición.
En este contexto, Input es una función utilizada para establecer una capa de entrada en un modelo de red neuronal. La propiedad shape=(14,) define la estructura de los datos de entrada, lo que indica que estos tendrán 14 dimensiones. Por otro lado, dtype='float32' especifica que los elementos de esta capa serán números de punto flotante de 32 bits
A continuación se define función buil_models para generación de modelos de acuerdo a los parámetros indexados a tráves de la lista.
A idexación de parámetros función build_models_mlp con base a lo indicado en el númeral 3 del parcial práctico.
input_shape14 = 14
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP14_vola14 = build_models_mlp(input_shape14, neurons_list, dropout_rates, 'tanh')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_426"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_427 (InputLayer) [(None, 14)] 0
dense_1092 (Dense) (None, 32) 480
dense_1093 (Dense) (None, 16) 528
dense_1094 (Dense) (None, 16) 272
dropout_426 (Dropout) (None, 16) 0
dense_1095 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_427"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_428 (InputLayer) [(None, 14)] 0
dense_1096 (Dense) (None, 32) 480
dense_1097 (Dense) (None, 16) 528
dense_1098 (Dense) (None, 16) 272
dropout_427 (Dropout) (None, 16) 0
dense_1099 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_428"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_429 (InputLayer) [(None, 14)] 0
dense_1100 (Dense) (None, 32) 480
dense_1101 (Dense) (None, 16) 528
dense_1102 (Dense) (None, 16) 272
dropout_428 (Dropout) (None, 16) 0
dense_1103 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_429"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_430 (InputLayer) [(None, 14)] 0
dense_1104 (Dense) (None, 32) 480
dense_1105 (Dense) (None, 16) 528
dense_1106 (Dense) (None, 16) 272
dropout_429 (Dropout) (None, 16) 0
dense_1107 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_430"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_431 (InputLayer) [(None, 14)] 0
dense_1108 (Dense) (None, 32) 480
dense_1109 (Dense) (None, 16) 528
dense_1110 (Dense) (None, 16) 272
dropout_430 (Dropout) (None, 16) 0
dense_1111 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_431"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_432 (InputLayer) [(None, 14)] 0
dense_1112 (Dense) (None, 32) 480
dense_1113 (Dense) (None, 16) 528
dense_1114 (Dense) (None, 16) 272
dropout_431 (Dropout) (None, 16) 0
dense_1115 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_432"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_433 (InputLayer) [(None, 14)] 0
dense_1116 (Dense) (None, 32) 480
dense_1117 (Dense) (None, 16) 528
dense_1118 (Dense) (None, 16) 272
dropout_432 (Dropout) (None, 16) 0
dense_1119 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_433"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_434 (InputLayer) [(None, 14)] 0
dense_1120 (Dense) (None, 32) 480
dense_1121 (Dense) (None, 16) 528
dense_1122 (Dense) (None, 16) 272
dropout_433 (Dropout) (None, 16) 0
dense_1123 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_434"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_435 (InputLayer) [(None, 14)] 0
dense_1124 (Dense) (None, 32) 480
dense_1125 (Dense) (None, 16) 528
dense_1126 (Dense) (None, 16) 272
dropout_434 (Dropout) (None, 16) 0
dense_1127 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_435"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_436 (InputLayer) [(None, 14)] 0
dense_1128 (Dense) (None, 32) 480
dense_1129 (Dense) (None, 16) 528
dense_1130 (Dense) (None, 16) 272
dropout_435 (Dropout) (None, 16) 0
dense_1131 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_436"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_437 (InputLayer) [(None, 14)] 0
dense_1132 (Dense) (None, 32) 480
dense_1133 (Dense) (None, 16) 528
dense_1134 (Dense) (None, 16) 272
dropout_436 (Dropout) (None, 16) 0
dense_1135 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_437"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_438 (InputLayer) [(None, 14)] 0
dense_1136 (Dense) (None, 32) 480
dense_1137 (Dense) (None, 16) 528
dense_1138 (Dense) (None, 16) 272
dropout_437 (Dropout) (None, 16) 0
dense_1139 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_438"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_439 (InputLayer) [(None, 14)] 0
dense_1140 (Dense) (None, 32) 480
dense_1141 (Dense) (None, 16) 528
dense_1142 (Dense) (None, 16) 272
dropout_438 (Dropout) (None, 16) 0
dense_1143 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_439"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_440 (InputLayer) [(None, 14)] 0
dense_1144 (Dense) (None, 32) 480
dense_1145 (Dense) (None, 16) 528
dense_1146 (Dense) (None, 16) 272
dropout_439 (Dropout) (None, 16) 0
dense_1147 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_440"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_441 (InputLayer) [(None, 14)] 0
dense_1148 (Dense) (None, 32) 480
dense_1149 (Dense) (None, 16) 528
dense_1150 (Dense) (None, 16) 272
dropout_440 (Dropout) (None, 16) 0
dense_1151 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_441"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_442 (InputLayer) [(None, 14)] 0
dense_1152 (Dense) (None, 32) 480
dense_1153 (Dense) (None, 16) 528
dense_1154 (Dense) (None, 16) 272
dropout_441 (Dropout) (None, 16) 0
dense_1155 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola14_MLP14_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best14_vola14 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP14.
import os
from joblib import dump, load
history_vola14_MPL14 = []
# Iterar sobre cada modelo en la lista models
for i, model in enumerate(models_MLP14_vola14):
filename = f'history_vola14_MPL14_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola14_14, y=y_train_vola14_14, batch_size=16, epochs=50,
verbose=2, callbacks=[save_best14_vola14], validation_data=(X_val_vola14_14, y_val_vola14_14),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola14_MPL14.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola14_MPL14_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL14_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL14_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL14_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL14_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL14_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL14_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL14_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL14_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL14_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL14_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL14_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL14_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL14_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL14_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL14_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola14_MLP14_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model14_vola14 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model14_vola14 = load_model(best_model_path) # Cargar el modelo
if best_model14_vola14 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola14_MLP14_weights.29-0.0005.keras con val_loss: 0.0005
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best_model14_vola14 is not None:
train_preds_vola14_MLP14 = best_model14_vola14.predict(X_train_vola14_14)
val_preds_vola14_MLP14 = best_model14_vola14.predict(X_val_vola14_14)
test_preds_vola14_MLP14 = best_model14_vola14.predict(X_test_vola14_14)
# Aplanar las predicciones si es necesario
train_preds_vola14_MLP14 = np.squeeze(train_preds_vola14_MLP14)
val_preds_vola14_MLP14 = np.squeeze(val_preds_vola14_MLP14)
test_preds_vola14_MLP14 = np.squeeze(test_preds_vola14_MLP14)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola14_MLP14)
print("Predicciones de validación:", val_preds_vola14_MLP14)
print("Predicciones de prueba:", test_preds_vola14_MLP14)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
155/155 [==============================] - 0s 194us/step
1/1 [==============================] - 0s 9ms/step
1/1 [==============================] - 0s 12ms/step
Predicciones de Entrenamiento: [0.00022482 0.00022482 0.00022482 ... 0.01751764 0.01638739 0.01642246]
Predicciones de validación: [0.01591108 0.01651501 0.01639629 0.01728163 0.01726819 0.01710023
0.01765272 0.01653662 0.01709314 0.01711379 0.01744539 0.01801313
0.0163977 0.01645755]
Predicciones de prueba: [0.01653196 0.01963498 0.02204968 0.03054131 0.03172324 0.03166819
0.03142552 0.03172671 0.03547365 0.04214239 0.04164523 0.0406197
0.03986698 0.04073464]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 14 días (\(\tau = 14\)).
data_train_plot_vola14_14, data_val_plot_vola14_14, data_test_plot_vola14_14 = data_plot(df_1_st['Volatilidad_14'], 14)
Datos de entrenamiento:
4998 0.0000
4997 0.0000
4996 0.0000
4995 0.0000
4994 0.0000
...
32 0.0173
31 0.0180
30 0.0163
29 0.0164
28 0.0163
Name: Volatilidad_14, Length: 4971, dtype: float6414
Datos de validación:
27 0.0195
26 0.0220
25 0.0307
24 0.0320
23 0.0320
22 0.0318
21 0.0318
20 0.0358
19 0.0426
18 0.0424
17 0.0416
16 0.0406
15 0.0409
14 0.0408
Name: Volatilidad_14, dtype: float6414
Datos de prueba:
13 0.0403
12 0.0403
11 0.0337
10 0.0338
9 0.0352
8 0.0395
7 0.0410
6 0.0351
5 0.0376
4 0.0445
3 0.0455
2 0.0456
1 0.0456
0 0.0478
Name: Volatilidad_14, dtype: float6414
plot_model(data_train_plot_vola14_14[-100:], data_val_plot_vola14_14, data_test_plot_vola14_14, val_preds_vola14_MLP14, test_preds_vola14_MLP14, "Predicciones usando Perceptrón Multicapa (MLP) para un horizonte de 14 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de validación(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train14_vola14, jarque_bera_pval_MLP_train14_vola14 = diagnostic_plots(y_train_vola14_14, train_preds_vola14_MLP14)
Ljung-Box LB Statistic: 552.860958
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola14_MLP_train14 = metricas(y_train_vola14_14,train_preds_vola14_MLP14)
metrica_vola14_MLP_train14.index = metrica_vola14_MLP_train14.index.map({0: 'MLP Entrenamiento Volatilidad ω = 14 y τ = 14'})
metrica_vola14_MLP_train14['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train14_vola14], index=metrica_vola14_MLP_train14.index)
metrica_vola14_MLP_train14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train14_vola14], index=metrica_vola14_MLP_train14.index)
metrica_vola14_MLP_train14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Volatilidad ω = 14 y τ = 14 | 1.666 | 7.83% | 0.0 | 0.0 | 91.45% | 3.0034e-122 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP14_vola14, jarque_bera_pvalMLP14_vola14 = evaluate_residuals(data_test_plot_vola14_14, test_preds_vola14_MLP14)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP14_test_vola14= metricas(y_test_vola14_14,test_preds_vola14_MLP14)
metrica_MLP14_test_vola14.index = metrica_MLP14_test_vola14.index.map({0: 'MLP Prueba Volatilidad ω = 14 τ = 14'})
metrica_MLP14_test_vola14['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP14_vola14], index=metrica_MLP14_test_vola14.index)
metrica_MLP14_test_vola14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP14_vola14], index=metrica_MLP14_test_vola14.index)
metrica_MLP14_test_vola14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Volatilidad ω = 14 τ = 14 | 0.0002 | 6.6% | 0.0 | 0.0 | 77.85% | 0.0148 | 0.0687 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola14_MPL14)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola14_14, train_preds_vola14_MLP14, y_val_vola14_14, val_preds_vola14_MLP14, y_test_vola14_14, test_preds_vola14_MLP14)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado. Sin embargo, a pesar de esto, se evidencia que existe autocorrelación en los residuales, lo que hace que nuestro modelo no sea confiable.
Horizonte de 21 días (\(\tau=21\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(21,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': ['tanh'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [100], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_vola14_21, y_train_vola14_21)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ...activation=tanh, epochs=100, learning_rate=0.001; total time= 19.3s
[CV] END ...activation=tanh, epochs=100, learning_rate=0.001; total time= 19.4s
[CV] END ...activation=tanh, epochs=100, learning_rate=0.001; total time= 19.6s
[CV] END ...activation=tanh, epochs=100, learning_rate=0.001; total time= 19.6s
[CV] END ...activation=tanh, epochs=100, learning_rate=0.001; total time= 19.7s
Mejor función de activación: tanh
Mejor número de epocas: 100
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -0.005027637165039778
A continuación, configuramos la red MLP empleando la API funcional de Keras. Con este método, una capa puede ser designada como la entrada para la siguiente capa en el momento de su definición.
En este contexto, Input es una función utilizada para establecer una capa de entrada en un modelo de red neuronal. La propiedad shape=(21,) define la estructura de los datos de entrada, lo que indica que estos tendrán 21 dimensiones. Por otro lado, dtype='float32' especifica que los elementos de esta capa serán números de punto flotante de 32 bits.
A continuación se define función buil_models para generación de modelos de acuerdo a los parámetros indexados a tráves de la lista.
A idexación de parámetros función build_models_mlp con base a lo indicado en el númeral 3 del parcial práctico.
input_shape21 = 21
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP21_vola14 = build_models_mlp(input_shape21, neurons_list, dropout_rates, 'tanh')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_443"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_444 (InputLayer) [(None, 21)] 0
dense_1160 (Dense) (None, 32) 704
dense_1161 (Dense) (None, 16) 528
dense_1162 (Dense) (None, 16) 272
dropout_443 (Dropout) (None, 16) 0
dense_1163 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_444"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_445 (InputLayer) [(None, 21)] 0
dense_1164 (Dense) (None, 32) 704
dense_1165 (Dense) (None, 16) 528
dense_1166 (Dense) (None, 16) 272
dropout_444 (Dropout) (None, 16) 0
dense_1167 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_445"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_446 (InputLayer) [(None, 21)] 0
dense_1168 (Dense) (None, 32) 704
dense_1169 (Dense) (None, 16) 528
dense_1170 (Dense) (None, 16) 272
dropout_445 (Dropout) (None, 16) 0
dense_1171 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_446"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_447 (InputLayer) [(None, 21)] 0
dense_1172 (Dense) (None, 32) 704
dense_1173 (Dense) (None, 16) 528
dense_1174 (Dense) (None, 16) 272
dropout_446 (Dropout) (None, 16) 0
dense_1175 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_447"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_448 (InputLayer) [(None, 21)] 0
dense_1176 (Dense) (None, 32) 704
dense_1177 (Dense) (None, 16) 528
dense_1178 (Dense) (None, 16) 272
dropout_447 (Dropout) (None, 16) 0
dense_1179 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_448"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_449 (InputLayer) [(None, 21)] 0
dense_1180 (Dense) (None, 32) 704
dense_1181 (Dense) (None, 16) 528
dense_1182 (Dense) (None, 16) 272
dropout_448 (Dropout) (None, 16) 0
dense_1183 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_449"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_450 (InputLayer) [(None, 21)] 0
dense_1184 (Dense) (None, 32) 704
dense_1185 (Dense) (None, 16) 528
dense_1186 (Dense) (None, 16) 272
dropout_449 (Dropout) (None, 16) 0
dense_1187 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_450"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_451 (InputLayer) [(None, 21)] 0
dense_1188 (Dense) (None, 32) 704
dense_1189 (Dense) (None, 16) 528
dense_1190 (Dense) (None, 16) 272
dropout_450 (Dropout) (None, 16) 0
dense_1191 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_451"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_452 (InputLayer) [(None, 21)] 0
dense_1192 (Dense) (None, 32) 704
dense_1193 (Dense) (None, 16) 528
dense_1194 (Dense) (None, 16) 272
dropout_451 (Dropout) (None, 16) 0
dense_1195 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_452"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_453 (InputLayer) [(None, 21)] 0
dense_1196 (Dense) (None, 32) 704
dense_1197 (Dense) (None, 16) 528
dense_1198 (Dense) (None, 16) 272
dropout_452 (Dropout) (None, 16) 0
dense_1199 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_453"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_454 (InputLayer) [(None, 21)] 0
dense_1200 (Dense) (None, 32) 704
dense_1201 (Dense) (None, 16) 528
dense_1202 (Dense) (None, 16) 272
dropout_453 (Dropout) (None, 16) 0
dense_1203 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_454"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_455 (InputLayer) [(None, 21)] 0
dense_1204 (Dense) (None, 32) 704
dense_1205 (Dense) (None, 16) 528
dense_1206 (Dense) (None, 16) 272
dropout_454 (Dropout) (None, 16) 0
dense_1207 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_455"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_456 (InputLayer) [(None, 21)] 0
dense_1208 (Dense) (None, 32) 704
dense_1209 (Dense) (None, 16) 528
dense_1210 (Dense) (None, 16) 272
dropout_455 (Dropout) (None, 16) 0
dense_1211 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_456"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_457 (InputLayer) [(None, 21)] 0
dense_1212 (Dense) (None, 32) 704
dense_1213 (Dense) (None, 16) 528
dense_1214 (Dense) (None, 16) 272
dropout_456 (Dropout) (None, 16) 0
dense_1215 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_457"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_458 (InputLayer) [(None, 21)] 0
dense_1216 (Dense) (None, 32) 704
dense_1217 (Dense) (None, 16) 528
dense_1218 (Dense) (None, 16) 272
dropout_457 (Dropout) (None, 16) 0
dense_1219 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_458"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_459 (InputLayer) [(None, 21)] 0
dense_1220 (Dense) (None, 32) 704
dense_1221 (Dense) (None, 16) 528
dense_1222 (Dense) (None, 16) 272
dropout_458 (Dropout) (None, 16) 0
dense_1223 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola14_MLP21_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best21_vola14 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP21.
import os
from joblib import dump, load
# Suponiendo que ts_model es una lista de modelos
history_vola14_MPL21 = []
# Iterar sobre cada modelo en la lista models_MLP14
for i, model in enumerate(models_MLP21_vola14):
filename = f'history_vola14_MPL21_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola14_21, y=y_train_vola14_21, batch_size=16, epochs=100,
verbose=2, callbacks=[save_best21_vola14], validation_data=(X_val_vola14_21, y_val_vola14_21),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola14_MPL21.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola14_MPL21_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL21_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL21_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL21_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL21_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL21_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL21_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL21_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL21_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL21_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL21_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL21_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL21_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL21_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL21_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL21_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola14_MLP21_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model21_vola14 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model21_vola14 = load_model(best_model_path) # Cargar el modelo
if best_model21_vola14 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola14_MLP21_weights.78-0.0010.keras con val_loss: 0.001
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best_model21_vola14 is not None:
train_preds_vola14_MLP21 = best_model21_vola14.predict(X_train_vola14_21)
val_preds_vola14_MLP21 = best_model21_vola14.predict(X_val_vola14_21)
test_preds_vola14_MLP21 = best_model21_vola14.predict(X_test_vola14_21)
# Aplanar las predicciones si es necesario
train_preds_vola14_MLP21 = np.squeeze(train_preds_vola14_MLP21)
val_preds_vola14_MLP21 = np.squeeze(val_preds_vola14_MLP21)
test_preds_vola14_MLP21 = np.squeeze(test_preds_vola14_MLP21)
# Imprimir las predicciones
print('Predicciones de entrenamiento', train_preds_vola14_MLP21)
print("Predicciones de validación:", val_preds_vola14_MLP21)
print("Predicciones de prueba:", test_preds_vola14_MLP21)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
154/154 [==============================] - 0s 183us/step
1/1 [==============================] - 0s 11ms/step
1/1 [==============================] - 0s 7ms/step
Predicciones de entrenamiento [0.00170384 0.00170384 0.00170384 ... 0.03128495 0.03137413 0.03155865]
Predicciones de validación: [0.02987536 0.02663189 0.02687607 0.02677922 0.02439515 0.02333529
0.02329596 0.02307745 0.02326437 0.02363529 0.02297621 0.02091753
0.02092136 0.02098885 0.02263275 0.01579397 0.01604423 0.01688812
0.01790908 0.01618985 0.01664273]
Predicciones de prueba: [0.01572575 0.01651403 0.01619201 0.01759873 0.01729779 0.0172427
0.01761704 0.01805063 0.01718391 0.0168886 0.01736357 0.01842539
0.01668458 0.01685067 0.01660944 0.01970051 0.02169567 0.03005343
0.03147137 0.03141434 0.03174061]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 21 días (\(\tau = 21\)).
data_train_plot_vola14_21, data_val_plot_vola14_21, data_test_plot_vola14_21 = data_plot(df_1_st['Volatilidad_14'], 21)
Datos de entrenamiento:
4998 0.0000
4997 0.0000
4996 0.0000
4995 0.0000
4994 0.0000
...
46 0.0170
45 0.0171
44 0.0161
43 0.0161
42 0.0158
Name: Volatilidad_14, Length: 4957, dtype: float6421
Datos de validación:
41 0.0162
40 0.0159
39 0.0169
38 0.0172
37 0.0171
36 0.0175
35 0.0163
34 0.0170
33 0.0170
32 0.0173
31 0.0180
30 0.0163
29 0.0164
28 0.0163
27 0.0195
26 0.0220
25 0.0307
24 0.0320
23 0.0320
22 0.0318
21 0.0318
Name: Volatilidad_14, dtype: float6421
Datos de prueba:
20 0.0358
19 0.0426
18 0.0424
17 0.0416
16 0.0406
15 0.0409
14 0.0408
13 0.0403
12 0.0403
11 0.0337
10 0.0338
9 0.0352
8 0.0395
7 0.0410
6 0.0351
5 0.0376
4 0.0445
3 0.0455
2 0.0456
1 0.0456
0 0.0478
Name: Volatilidad_14, dtype: float6421
plot_model(data_train_plot_vola14_21[-100:], data_val_plot_vola14_21, data_test_plot_vola14_21, val_preds_vola14_MLP21, test_preds_vola14_MLP21, "Predicciones usando Perceptrón Multicapa (MLP) para un horizonte de 21 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de validación(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train21_vola14, jarque_bera_pval_MLP_train21_vola14 = diagnostic_plots(y_train_vola14_21, train_preds_vola14_MLP21)
Ljung-Box LB Statistic: 1007.350768
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola14_MLP_train21 = metricas(y_train_vola14_21,train_preds_vola14_MLP21)
metrica_vola14_MLP_train21.index = metrica_vola14_MLP_train21.index.map({0: 'MLP Entrenamiento Volatilidad ω = 14 y τ = 21'})
metrica_vola14_MLP_train21['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train21_vola14], index=metrica_vola14_MLP_train21.index)
metrica_vola14_MLP_train21['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train21_vola14], index=metrica_vola14_MLP_train21.index)
metrica_vola14_MLP_train21
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Volatilidad ω = 14 y τ = 21 | 1.7507 | 8.31% | 0.0 | 0.0 | 91.0% | 4.5340e-221 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP21_vola14, jarque_bera_pvalMLP21_vola14 = evaluate_residuals(data_test_plot_vola14_21, test_preds_vola14_MLP21)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP21_test_vola14 = metricas(y_test_vola14_21,test_preds_vola14_MLP21)
metrica_MLP21_test_vola14.index = metrica_MLP21_test_vola14.index.map({0: 'MLP Prueba Volatilidad ω = 14 y τ = 21'})
metrica_MLP21_test_vola14['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP21_vola14], index=metrica_MLP21_test_vola14.index)
metrica_MLP21_test_vola14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP21_vola14], index=metrica_MLP21_test_vola14.index)
metrica_MLP21_test_vola14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Volatilidad ω = 14 y τ = 21 | 0.0001 | 5.67% | 0.0 | 0.0 | 86.73% | 0.0036 | 0.3858 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola14_MPL21)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola14_21, train_preds_vola14_MLP21, y_val_vola14_21, val_preds_vola14_MLP21, y_test_vola14_21, test_preds_vola14_MLP21)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo. De igual forma se observa una alta variabilidad en los residuales.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 28 días (\(\tau=28\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(28,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [50], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_vola14_28, y_train_vola14_28)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 8.8s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 8.8s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 8.8s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 8.8s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 8.9s
Mejor función de activación: relu
Mejor número de epocas: 50
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -0.004603199660778046
A continuación, configuramos la red MLP empleando la API funcional de Keras. Con este método, una capa puede ser designada como la entrada para la siguiente capa en el momento de su definición.
En este contexto, Input es una función utilizada para establecer una capa de entrada en un modelo de red neuronal. La propiedad shape=(28,) define la estructura de los datos de entrada, lo que indica que estos tendrán 28 dimensiones. Por otro lado, dtype='float32' especifica que los elementos de esta capa serán números de punto flotante de 32 bits.
A continuación se define función buil_models para generación de modelos de acuerdo a los parámetros indexados a tráves de la lista.
A idexación de parámetros función build_models_mlp con base a lo indicado en el númeral 3 del parcial práctico.
input_shape28 = 28
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP28_vola14 = build_models_mlp(input_shape28, neurons_list, dropout_rates, 'relu')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_460"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_461 (InputLayer) [(None, 28)] 0
dense_1228 (Dense) (None, 32) 928
dense_1229 (Dense) (None, 16) 528
dense_1230 (Dense) (None, 16) 272
dropout_460 (Dropout) (None, 16) 0
dense_1231 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_461"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_462 (InputLayer) [(None, 28)] 0
dense_1232 (Dense) (None, 32) 928
dense_1233 (Dense) (None, 16) 528
dense_1234 (Dense) (None, 16) 272
dropout_461 (Dropout) (None, 16) 0
dense_1235 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_462"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_463 (InputLayer) [(None, 28)] 0
dense_1236 (Dense) (None, 32) 928
dense_1237 (Dense) (None, 16) 528
dense_1238 (Dense) (None, 16) 272
dropout_462 (Dropout) (None, 16) 0
dense_1239 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_463"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_464 (InputLayer) [(None, 28)] 0
dense_1240 (Dense) (None, 32) 928
dense_1241 (Dense) (None, 16) 528
dense_1242 (Dense) (None, 16) 272
dropout_463 (Dropout) (None, 16) 0
dense_1243 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_464"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_465 (InputLayer) [(None, 28)] 0
dense_1244 (Dense) (None, 32) 928
dense_1245 (Dense) (None, 16) 528
dense_1246 (Dense) (None, 16) 272
dropout_464 (Dropout) (None, 16) 0
dense_1247 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_465"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_466 (InputLayer) [(None, 28)] 0
dense_1248 (Dense) (None, 32) 928
dense_1249 (Dense) (None, 16) 528
dense_1250 (Dense) (None, 16) 272
dropout_465 (Dropout) (None, 16) 0
dense_1251 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_466"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_467 (InputLayer) [(None, 28)] 0
dense_1252 (Dense) (None, 32) 928
dense_1253 (Dense) (None, 16) 528
dense_1254 (Dense) (None, 16) 272
dropout_466 (Dropout) (None, 16) 0
dense_1255 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_467"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_468 (InputLayer) [(None, 28)] 0
dense_1256 (Dense) (None, 32) 928
dense_1257 (Dense) (None, 16) 528
dense_1258 (Dense) (None, 16) 272
dropout_467 (Dropout) (None, 16) 0
dense_1259 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_468"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_469 (InputLayer) [(None, 28)] 0
dense_1260 (Dense) (None, 32) 928
dense_1261 (Dense) (None, 16) 528
dense_1262 (Dense) (None, 16) 272
dropout_468 (Dropout) (None, 16) 0
dense_1263 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_469"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_470 (InputLayer) [(None, 28)] 0
dense_1264 (Dense) (None, 32) 928
dense_1265 (Dense) (None, 16) 528
dense_1266 (Dense) (None, 16) 272
dropout_469 (Dropout) (None, 16) 0
dense_1267 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_470"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_471 (InputLayer) [(None, 28)] 0
dense_1268 (Dense) (None, 32) 928
dense_1269 (Dense) (None, 16) 528
dense_1270 (Dense) (None, 16) 272
dropout_470 (Dropout) (None, 16) 0
dense_1271 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_471"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_472 (InputLayer) [(None, 28)] 0
dense_1272 (Dense) (None, 32) 928
dense_1273 (Dense) (None, 16) 528
dense_1274 (Dense) (None, 16) 272
dropout_471 (Dropout) (None, 16) 0
dense_1275 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_472"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_473 (InputLayer) [(None, 28)] 0
dense_1276 (Dense) (None, 32) 928
dense_1277 (Dense) (None, 16) 528
dense_1278 (Dense) (None, 16) 272
dropout_472 (Dropout) (None, 16) 0
dense_1279 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_473"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_474 (InputLayer) [(None, 28)] 0
dense_1280 (Dense) (None, 32) 928
dense_1281 (Dense) (None, 16) 528
dense_1282 (Dense) (None, 16) 272
dropout_473 (Dropout) (None, 16) 0
dense_1283 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_474"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_475 (InputLayer) [(None, 28)] 0
dense_1284 (Dense) (None, 32) 928
dense_1285 (Dense) (None, 16) 528
dense_1286 (Dense) (None, 16) 272
dropout_474 (Dropout) (None, 16) 0
dense_1287 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_475"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_476 (InputLayer) [(None, 28)] 0
dense_1288 (Dense) (None, 32) 928
dense_1289 (Dense) (None, 16) 528
dense_1290 (Dense) (None, 16) 272
dropout_475 (Dropout) (None, 16) 0
dense_1291 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola14_MLP28_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best28_vola14 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP28.
import os
from joblib import dump, load
# Suponiendo que ts_model es una lista de modelos
history_vola14_MPL28 = []
# Iterar sobre cada modelo en la lista models
for i, model in enumerate(models_MLP28_vola14):
filename = f'history_vola14_MPL28_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola14_28, y=y_train_vola14_28, batch_size=16, epochs=50, #Las epocas son las estimadas en el grid search
verbose=2, callbacks=[save_best28_vola14], validation_data=(X_val_vola14_28, y_val_vola14_28),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola14_MPL28.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola14_MPL28_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL28_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL28_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL28_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL28_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL28_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL28_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL28_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL28_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL28_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL28_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL28_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL28_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL28_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL28_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_MPL28_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola14_MLP28_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model28_vola14 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model28_vola14 = load_model(best_model_path) # Cargar el modelo
if best_model28_vola14 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola14_MLP28_weights.31-0.0011.keras con val_loss: 0.0011
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best_model28_vola14 is not None:
train_preds_vola14_MLP28 = best_model28_vola14.predict(X_train_vola14_28)
val_preds_vola14_MLP28 = best_model28_vola14.predict(X_val_vola14_28)
test_preds_vola14_MLP28 = best_model28_vola14.predict(X_test_vola14_28)
# Aplanar las predicciones si es necesario
train_preds_vola14_MLP28 = np.squeeze(train_preds_vola14_MLP28)
val_preds_vola14_MLP28 = np.squeeze(val_preds_vola14_MLP28)
test_preds_vola14_MLP28 = np.squeeze(test_preds_vola14_MLP28)
# Imprimir las predicciones
print("Predicciones de entrenamiento:", train_preds_vola14_MLP28)
print("Predicciones de validación:", val_preds_vola14_MLP28)
print("Predicciones de prueba:", test_preds_vola14_MLP28)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
153/153 [==============================] - 0s 190us/step
1/1 [==============================] - 0s 9ms/step
1/1 [==============================] - 0s 10ms/step
Predicciones de entrenamiento: [0.00596403 0.00596403 0.00596403 ... 0.02038304 0.02024306 0.01999932]
Predicciones de validación: [0.01989005 0.02129651 0.02071984 0.02234683 0.02395498 0.02456236
0.02435488 0.02554766 0.02928887 0.03004147 0.0294 0.02937974
0.03471796 0.03623903 0.03580884 0.03472243 0.03353471 0.03160591
0.0319986 0.03224634 0.03198493 0.03060053 0.02734821 0.02674336
0.02669819 0.02469058 0.02361169 0.02332568]
Predicciones de prueba: [0.02341563 0.02329951 0.0239257 0.0232718 0.02181141 0.02139834
0.02170686 0.02298849 0.0185006 0.01725134 0.01746814 0.01890099
0.01789921 0.01819181 0.01704365 0.01761634 0.01743313 0.01868746
0.01865299 0.01883747 0.0188517 0.01915296 0.01872328 0.01836095
0.01840936 0.01957742 0.01822976 0.01814106]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 28 días (\(\tau = 28\)).
data_train_plot_vola14_28, data_val_plot_vola14_28 , data_test_plot_vola14_28 = data_plot(df_1_st['Volatilidad_14'], 28)
Datos de entrenamiento:
4998 0.0000
4997 0.0000
4996 0.0000
4995 0.0000
4994 0.0000
...
60 0.0268
59 0.0268
58 0.0236
57 0.0237
56 0.0226
Name: Volatilidad_14, Length: 4943, dtype: float6428
Datos de validación:
55 0.0235
54 0.0233
53 0.0233
52 0.0213
51 0.0212
50 0.0213
49 0.0215
48 0.0158
47 0.0158
46 0.0170
45 0.0171
44 0.0161
43 0.0161
42 0.0158
41 0.0162
40 0.0159
39 0.0169
38 0.0172
37 0.0171
36 0.0175
35 0.0163
34 0.0170
33 0.0170
32 0.0173
31 0.0180
30 0.0163
29 0.0164
28 0.0163
Name: Volatilidad_14, dtype: float6428
Datos de prueba:
27 0.0195
26 0.0220
25 0.0307
24 0.0320
23 0.0320
22 0.0318
21 0.0318
20 0.0358
19 0.0426
18 0.0424
17 0.0416
16 0.0406
15 0.0409
14 0.0408
13 0.0403
12 0.0403
11 0.0337
10 0.0338
9 0.0352
8 0.0395
7 0.0410
6 0.0351
5 0.0376
4 0.0445
3 0.0455
2 0.0456
1 0.0456
0 0.0478
Name: Volatilidad_14, dtype: float6428
plot_model(data_train_plot_vola14_28[-100:], data_val_plot_vola14_28, data_test_plot_vola14_28, val_preds_vola14_MLP28, test_preds_vola14_MLP28, "Predicciones usando Perceptrón Multicapa (MLP) para un horizonte de 28 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de validación(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train28_vola14, jarque_bera_pval_MLP_train28_vola14 = diagnostic_plots(y_train_vola14_28, train_preds_vola14_MLP28)
Ljung-Box LB Statistic: 70.143822
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola14_MLP_train28 = metricas(y_train_vola14_28,train_preds_vola14_MLP28)
metrica_vola14_MLP_train28.index = metrica_vola14_MLP_train28.index.map({0: 'MLP Entrenamiento Volatilidad ω = 14 y τ = 28'})
metrica_vola14_MLP_train28['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train28_vola14], index=metrica_vola14_MLP_train28.index)
metrica_vola14_MLP_train28['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train28_vola14], index=metrica_vola14_MLP_train28.index)
metrica_vola14_MLP_train28
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Volatilidad ω = 14 y τ = 28 | 1.1029 | 10.11% | 0.0 | 0.0 | 94.33% | 5.5134e-17 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP28_vola14, jarque_bera_pvalMLP28_vola14 = evaluate_residuals(data_test_plot_vola14_28, test_preds_vola14_MLP28)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP28_test_vola14 = metricas(y_test_vola14_28,test_preds_vola7_MLP28)
metrica_MLP28_test_vola14.index = metrica_MLP28_test_vola14.index.map({0: 'MLP Prueba Volatilidad ω = 14 y τ = 28'})
metrica_MLP28_test_vola14['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP28_vola14], index=metrica_MLP28_test_vola14.index)
metrica_MLP28_test_vola14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP28_vola14], index=metrica_MLP28_test_vola14.index)
metrica_MLP28_test_vola14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Volatilidad ω = 14 y τ = 28 | 0.0004 | 18.21% | 0.0 | 0.0 | -135.94% | 3.1530e-05 | 0.1321 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola14_MPL28)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola14_28, train_preds_vola14_MLP28, y_val_vola14_28, val_preds_vola14_MLP28, y_test_vola14_28, test_preds_vola14_MLP28)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra muy cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo. De igual forma se observa una alta variabilidad en los residuales.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Volatilidad ω = 14 (Volatilidad_14): Memoria a Corto y Largo Plazo (LSTM) #
Ya definimos los regresores(X) y la variable objetivo(y) para el proceso de entrenamiento y validación en la selección correspondiente al modelo Perceptrones Multicapa a tráves de la función create_time_series_datasets(), sin embargo, ésta se utiliza para generar arreglos 2D de forma (números de muestras, números de paso de tiempo). Dado que la entrada a las capas de una RNN debe ser de forma: número de muestras, número de paso de tiempo, número de características por paso de tiempo; procedemos con la sefinición de la función change_dimension_lstm() para realizar la transformación de 2D a 3D.
Horizonte de 7 días (\(\tau=7\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_vola14_lstm_7, X_val_vola14_lstm_7, X_test_vola14_lstm_7 = change_dimension_lstm(X_train_vola14_7, X_val_vola14_7, X_test_vola14_7)
Shape of 3D arrays X: (4971, 7, 1) (7, 7, 1) (7, 7, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(7,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=20, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_vola14_lstm_7, y_train_vola14_7)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 21.0s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 21.4s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 21.6s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 21.6s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 21.7s
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 0.016871558798797655
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape7 = 7
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM7_vola14 = build_models_lstm(input_shape7, neurons_list, dropout_rates ,'SGD')
Model: "model_477"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_478 (InputLayer) [(None, 7, 1)] 0
lstm_410 (LSTM) (None, 7, 64) 16896
lstm_411 (LSTM) (None, 32) 12416
dropout_477 (Dropout) (None, 32) 0
dense_1293 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_477"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_478 (InputLayer) [(None, 7, 1)] 0
lstm_410 (LSTM) (None, 7, 64) 16896
lstm_411 (LSTM) (None, 32) 12416
dropout_477 (Dropout) (None, 32) 0
dense_1293 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_478"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_479 (InputLayer) [(None, 7, 1)] 0
lstm_412 (LSTM) (None, 7, 64) 16896
lstm_413 (LSTM) (None, 32) 12416
dropout_478 (Dropout) (None, 32) 0
dense_1294 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_478"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_479 (InputLayer) [(None, 7, 1)] 0
lstm_412 (LSTM) (None, 7, 64) 16896
lstm_413 (LSTM) (None, 32) 12416
dropout_478 (Dropout) (None, 32) 0
dense_1294 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_479"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_480 (InputLayer) [(None, 7, 1)] 0
lstm_414 (LSTM) (None, 7, 64) 16896
lstm_415 (LSTM) (None, 32) 12416
dropout_479 (Dropout) (None, 32) 0
dense_1295 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_479"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_480 (InputLayer) [(None, 7, 1)] 0
lstm_414 (LSTM) (None, 7, 64) 16896
lstm_415 (LSTM) (None, 32) 12416
dropout_479 (Dropout) (None, 32) 0
dense_1295 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_480"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_481 (InputLayer) [(None, 7, 1)] 0
lstm_416 (LSTM) (None, 7, 64) 16896
lstm_417 (LSTM) (None, 32) 12416
dropout_480 (Dropout) (None, 32) 0
dense_1296 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_480"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_481 (InputLayer) [(None, 7, 1)] 0
lstm_416 (LSTM) (None, 7, 64) 16896
lstm_417 (LSTM) (None, 32) 12416
dropout_480 (Dropout) (None, 32) 0
dense_1296 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_481"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_482 (InputLayer) [(None, 7, 1)] 0
lstm_418 (LSTM) (None, 7, 64) 16896
lstm_419 (LSTM) (None, 32) 12416
dropout_481 (Dropout) (None, 32) 0
dense_1297 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_481"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_482 (InputLayer) [(None, 7, 1)] 0
lstm_418 (LSTM) (None, 7, 64) 16896
lstm_419 (LSTM) (None, 32) 12416
dropout_481 (Dropout) (None, 32) 0
dense_1297 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_482"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_483 (InputLayer) [(None, 7, 1)] 0
lstm_420 (LSTM) (None, 7, 64) 16896
lstm_421 (LSTM) (None, 32) 12416
dropout_482 (Dropout) (None, 32) 0
dense_1298 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_482"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_483 (InputLayer) [(None, 7, 1)] 0
lstm_420 (LSTM) (None, 7, 64) 16896
lstm_421 (LSTM) (None, 32) 12416
dropout_482 (Dropout) (None, 32) 0
dense_1298 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_483"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_484 (InputLayer) [(None, 7, 1)] 0
lstm_422 (LSTM) (None, 7, 64) 16896
lstm_423 (LSTM) (None, 32) 12416
dropout_483 (Dropout) (None, 32) 0
dense_1299 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_483"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_484 (InputLayer) [(None, 7, 1)] 0
lstm_422 (LSTM) (None, 7, 64) 16896
lstm_423 (LSTM) (None, 32) 12416
dropout_483 (Dropout) (None, 32) 0
dense_1299 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_484"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_485 (InputLayer) [(None, 7, 1)] 0
lstm_424 (LSTM) (None, 7, 64) 16896
lstm_425 (LSTM) (None, 32) 12416
dropout_484 (Dropout) (None, 32) 0
dense_1300 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_484"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_485 (InputLayer) [(None, 7, 1)] 0
lstm_424 (LSTM) (None, 7, 64) 16896
lstm_425 (LSTM) (None, 32) 12416
dropout_484 (Dropout) (None, 32) 0
dense_1300 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_485"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_486 (InputLayer) [(None, 7, 1)] 0
lstm_426 (LSTM) (None, 7, 64) 16896
lstm_427 (LSTM) (None, 32) 12416
dropout_485 (Dropout) (None, 32) 0
dense_1301 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_485"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_486 (InputLayer) [(None, 7, 1)] 0
lstm_426 (LSTM) (None, 7, 64) 16896
lstm_427 (LSTM) (None, 32) 12416
dropout_485 (Dropout) (None, 32) 0
dense_1301 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_486"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_487 (InputLayer) [(None, 7, 1)] 0
lstm_428 (LSTM) (None, 7, 64) 16896
lstm_429 (LSTM) (None, 32) 12416
dropout_486 (Dropout) (None, 32) 0
dense_1302 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_486"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_487 (InputLayer) [(None, 7, 1)] 0
lstm_428 (LSTM) (None, 7, 64) 16896
lstm_429 (LSTM) (None, 32) 12416
dropout_486 (Dropout) (None, 32) 0
dense_1302 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_487"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_488 (InputLayer) [(None, 7, 1)] 0
lstm_430 (LSTM) (None, 7, 64) 16896
lstm_431 (LSTM) (None, 32) 12416
dropout_487 (Dropout) (None, 32) 0
dense_1303 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_487"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_488 (InputLayer) [(None, 7, 1)] 0
lstm_430 (LSTM) (None, 7, 64) 16896
lstm_431 (LSTM) (None, 32) 12416
dropout_487 (Dropout) (None, 32) 0
dense_1303 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_488"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_489 (InputLayer) [(None, 7, 1)] 0
lstm_432 (LSTM) (None, 7, 64) 16896
lstm_433 (LSTM) (None, 32) 12416
dropout_488 (Dropout) (None, 32) 0
dense_1304 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_488"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_489 (InputLayer) [(None, 7, 1)] 0
lstm_432 (LSTM) (None, 7, 64) 16896
lstm_433 (LSTM) (None, 32) 12416
dropout_488 (Dropout) (None, 32) 0
dense_1304 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_489"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_490 (InputLayer) [(None, 7, 1)] 0
lstm_434 (LSTM) (None, 7, 64) 16896
lstm_435 (LSTM) (None, 32) 12416
dropout_489 (Dropout) (None, 32) 0
dense_1305 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_489"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_490 (InputLayer) [(None, 7, 1)] 0
lstm_434 (LSTM) (None, 7, 64) 16896
lstm_435 (LSTM) (None, 32) 12416
dropout_489 (Dropout) (None, 32) 0
dense_1305 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_490"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_491 (InputLayer) [(None, 7, 1)] 0
lstm_436 (LSTM) (None, 7, 64) 16896
lstm_437 (LSTM) (None, 32) 12416
dropout_490 (Dropout) (None, 32) 0
dense_1306 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_490"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_491 (InputLayer) [(None, 7, 1)] 0
lstm_436 (LSTM) (None, 7, 64) 16896
lstm_437 (LSTM) (None, 32) 12416
dropout_490 (Dropout) (None, 32) 0
dense_1306 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_491"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_492 (InputLayer) [(None, 7, 1)] 0
lstm_438 (LSTM) (None, 7, 64) 16896
lstm_439 (LSTM) (None, 32) 12416
dropout_491 (Dropout) (None, 32) 0
dense_1307 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_491"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_492 (InputLayer) [(None, 7, 1)] 0
lstm_438 (LSTM) (None, 7, 64) 16896
lstm_439 (LSTM) (None, 32) 12416
dropout_491 (Dropout) (None, 32) 0
dense_1307 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_492"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_493 (InputLayer) [(None, 7, 1)] 0
lstm_440 (LSTM) (None, 7, 64) 16896
lstm_441 (LSTM) (None, 32) 12416
dropout_492 (Dropout) (None, 32) 0
dense_1308 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_492"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_493 (InputLayer) [(None, 7, 1)] 0
lstm_440 (LSTM) (None, 7, 64) 16896
lstm_441 (LSTM) (None, 32) 12416
dropout_492 (Dropout) (None, 32) 0
dense_1308 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola14_LSTM_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best7_lstm_vola14 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM7.
import os
from joblib import dump, load
# Suponiendo que ts_model es una lista de modelos
history_vola14_LSTM7 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM7_vola14):
filename = f'history_vola14_LSTM_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola14_lstm_7, y=y_train_vola14_7, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best7_lstm_vola14], validation_data=(X_val_vola14_lstm_7, y_val_vola14_7),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola14_LSTM7.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola14_LSTM_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola14_LSTM_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best7_lstm_vola14 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best7_lstm_vola14 = load_model(best_model_path) # Cargar el modelo
if best7_lstm_vola14 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola14_LSTM_weights.16-0.0017.keras con val_loss: 0.0017
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best7_lstm_vola14 is not None:
# Asegúrate de que X_val_price7 y X_test_price7 tengan la forma correcta
train_preds_vola14_LSTM7 = best7_lstm_vola14.predict(X_train_vola14_lstm_7)
val_preds_vola14_LSTM7 = best7_lstm_vola14.predict(X_val_vola14_lstm_7)
test_preds_vola14_LSTM7 = best7_lstm_vola14.predict(X_test_vola14_lstm_7)
# Aplanar las predicciones si es necesario
train_preds_vola14_LSTM7 = np.squeeze(train_preds_vola14_LSTM7)
val_preds_vola14_LSTM7 = np.squeeze(val_preds_vola14_LSTM7)
test_preds_vola14_LSTM7 = np.squeeze(test_preds_vola14_LSTM7)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola14_LSTM7)
print("Predicciones de validación:", val_preds_vola14_LSTM7)
print("Predicciones de prueba:", test_preds_vola14_LSTM7)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
156/156 [==============================] - 0s 811us/step
1/1 [==============================] - 0s 8ms/step
1/1 [==============================] - 0s 9ms/step
Predicciones de Entrenamiento: [0.00203688 0.00203688 0.00203688 ... 0.03236723 0.03250549 0.03231812]
Predicciones de validación: [0.03228017 0.0362203 0.04266954 0.042154 0.04160269 0.04062048
0.0409362 ]
Predicciones de prueba: [0.04069978 0.04028278 0.04028784 0.03367012 0.0342448 0.03542686
0.03968877]
plot_model(data_train_plot_vola7[-100:], data_val_plot_vola7, data_test_plot_vola7, val_preds_vola7_LSTM7, test_preds_vola7_LSTM7, "Predicciones usando Memoria a Corto y Largo Paso (LSTM) para un horizonte de 7 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train7_vola14, jarque_bera_pval_LSTM_train7_vola14 = diagnostic_plots(y_train_vola14_7, train_preds_vola14_LSTM7)
Ljung-Box LB Statistic: 14.957703
Ljung-Box p-value: 0.000110
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola14_LSTM_train = metricas(y_train_vola14_7,train_preds_vola14_LSTM7)
metrica_vola14_LSTM_train.index = metrica_vola14_LSTM_train.index.map({0: 'LSTM Entrenamiento Volatilidad ω = 14 y τ = 7'})
metrica_vola14_LSTM_train['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train7_vola14], index=metrica_vola14_LSTM_train.index)
metrica_vola14_LSTM_train['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train7_vola14], index=metrica_vola14_LSTM_train.index)
metrica_vola14_LSTM_train
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Volatilidad ω = 14 y τ = 7 | 1.4121 | 8.87% | 0.0 | 0.0 | 92.76% | 0.0001 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM7_vola14, jarque_bera_pvalLSTM7_vola14 = evaluate_residuals(data_test_plot_vola14_7, test_preds_vola14_LSTM7)
No se rechaza H0: los residuales son independientes (no correlacionados).
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test_vola14 = metricas(y_test_vola14_7,test_preds_vola14_LSTM7)
metrica_LSTM_test_vola14.index = metrica_LSTM_test_vola14.index.map({0: 'LSTM Prueba Volatilidad ω = 14 y τ = 7'})
metrica_LSTM_test_vola14['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM7_vola14], index=metrica_LSTM_test_vola14.index)
metrica_LSTM_test_vola14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM7_vola14], index=metrica_LSTM_test_vola14.index)
metrica_LSTM_test_vola14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Volatilidad ω = 14 y τ = 7 | 6.3947e-05 | 5.38% | 0.0 | 0.0 | 3.1% | 0.0604 | 0.6416 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola14_LSTM7)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola14_7, train_preds_vola14_LSTM7, y_val_vola14_7, val_preds_vola14_LSTM7, y_test_vola14_7, test_preds_vola14_LSTM7)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 14 días (\(\tau=14\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_vola14_lstm_14, X_val_vola14_lstm_14, X_test_vola14_lstm_14 = change_dimension_lstm(X_train_vola14_14, X_val_vola14_14, X_test_vola14_14)
Shape of 3D arrays X: (4943, 14, 1) (14, 14, 1) (14, 14, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(14,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=10, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_vola14_lstm_14, y_train_vola14_14)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 28.4s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 28.7s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 28.7s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 28.8s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 29.3s
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 0.018342773872898954
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape14 = 14
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM14_vola14 = build_models_lstm(input_shape14, neurons_list, dropout_rates, 'SGD')
Model: "model_494"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_495 (InputLayer) [(None, 14, 1)] 0
lstm_444 (LSTM) (None, 14, 64) 16896
lstm_445 (LSTM) (None, 32) 12416
dropout_494 (Dropout) (None, 32) 0
dense_1310 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_494"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_495 (InputLayer) [(None, 14, 1)] 0
lstm_444 (LSTM) (None, 14, 64) 16896
lstm_445 (LSTM) (None, 32) 12416
dropout_494 (Dropout) (None, 32) 0
dense_1310 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_495"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_496 (InputLayer) [(None, 14, 1)] 0
lstm_446 (LSTM) (None, 14, 64) 16896
lstm_447 (LSTM) (None, 32) 12416
dropout_495 (Dropout) (None, 32) 0
dense_1311 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_495"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_496 (InputLayer) [(None, 14, 1)] 0
lstm_446 (LSTM) (None, 14, 64) 16896
lstm_447 (LSTM) (None, 32) 12416
dropout_495 (Dropout) (None, 32) 0
dense_1311 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_496"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_497 (InputLayer) [(None, 14, 1)] 0
lstm_448 (LSTM) (None, 14, 64) 16896
lstm_449 (LSTM) (None, 32) 12416
dropout_496 (Dropout) (None, 32) 0
dense_1312 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_496"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_497 (InputLayer) [(None, 14, 1)] 0
lstm_448 (LSTM) (None, 14, 64) 16896
lstm_449 (LSTM) (None, 32) 12416
dropout_496 (Dropout) (None, 32) 0
dense_1312 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_497"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_498 (InputLayer) [(None, 14, 1)] 0
lstm_450 (LSTM) (None, 14, 64) 16896
lstm_451 (LSTM) (None, 32) 12416
dropout_497 (Dropout) (None, 32) 0
dense_1313 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_497"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_498 (InputLayer) [(None, 14, 1)] 0
lstm_450 (LSTM) (None, 14, 64) 16896
lstm_451 (LSTM) (None, 32) 12416
dropout_497 (Dropout) (None, 32) 0
dense_1313 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_498"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_499 (InputLayer) [(None, 14, 1)] 0
lstm_452 (LSTM) (None, 14, 64) 16896
lstm_453 (LSTM) (None, 32) 12416
dropout_498 (Dropout) (None, 32) 0
dense_1314 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_498"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_499 (InputLayer) [(None, 14, 1)] 0
lstm_452 (LSTM) (None, 14, 64) 16896
lstm_453 (LSTM) (None, 32) 12416
dropout_498 (Dropout) (None, 32) 0
dense_1314 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_499"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_500 (InputLayer) [(None, 14, 1)] 0
lstm_454 (LSTM) (None, 14, 64) 16896
lstm_455 (LSTM) (None, 32) 12416
dropout_499 (Dropout) (None, 32) 0
dense_1315 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_499"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_500 (InputLayer) [(None, 14, 1)] 0
lstm_454 (LSTM) (None, 14, 64) 16896
lstm_455 (LSTM) (None, 32) 12416
dropout_499 (Dropout) (None, 32) 0
dense_1315 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_500"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_501 (InputLayer) [(None, 14, 1)] 0
lstm_456 (LSTM) (None, 14, 64) 16896
lstm_457 (LSTM) (None, 32) 12416
dropout_500 (Dropout) (None, 32) 0
dense_1316 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_500"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_501 (InputLayer) [(None, 14, 1)] 0
lstm_456 (LSTM) (None, 14, 64) 16896
lstm_457 (LSTM) (None, 32) 12416
dropout_500 (Dropout) (None, 32) 0
dense_1316 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_501"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_502 (InputLayer) [(None, 14, 1)] 0
lstm_458 (LSTM) (None, 14, 64) 16896
lstm_459 (LSTM) (None, 32) 12416
dropout_501 (Dropout) (None, 32) 0
dense_1317 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_501"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_502 (InputLayer) [(None, 14, 1)] 0
lstm_458 (LSTM) (None, 14, 64) 16896
lstm_459 (LSTM) (None, 32) 12416
dropout_501 (Dropout) (None, 32) 0
dense_1317 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_502"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_503 (InputLayer) [(None, 14, 1)] 0
lstm_460 (LSTM) (None, 14, 64) 16896
lstm_461 (LSTM) (None, 32) 12416
dropout_502 (Dropout) (None, 32) 0
dense_1318 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_502"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_503 (InputLayer) [(None, 14, 1)] 0
lstm_460 (LSTM) (None, 14, 64) 16896
lstm_461 (LSTM) (None, 32) 12416
dropout_502 (Dropout) (None, 32) 0
dense_1318 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_503"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_504 (InputLayer) [(None, 14, 1)] 0
lstm_462 (LSTM) (None, 14, 64) 16896
lstm_463 (LSTM) (None, 32) 12416
dropout_503 (Dropout) (None, 32) 0
dense_1319 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_503"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_504 (InputLayer) [(None, 14, 1)] 0
lstm_462 (LSTM) (None, 14, 64) 16896
lstm_463 (LSTM) (None, 32) 12416
dropout_503 (Dropout) (None, 32) 0
dense_1319 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_504"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_505 (InputLayer) [(None, 14, 1)] 0
lstm_464 (LSTM) (None, 14, 64) 16896
lstm_465 (LSTM) (None, 32) 12416
dropout_504 (Dropout) (None, 32) 0
dense_1320 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_504"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_505 (InputLayer) [(None, 14, 1)] 0
lstm_464 (LSTM) (None, 14, 64) 16896
lstm_465 (LSTM) (None, 32) 12416
dropout_504 (Dropout) (None, 32) 0
dense_1320 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_505"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_506 (InputLayer) [(None, 14, 1)] 0
lstm_466 (LSTM) (None, 14, 64) 16896
lstm_467 (LSTM) (None, 32) 12416
dropout_505 (Dropout) (None, 32) 0
dense_1321 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_505"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_506 (InputLayer) [(None, 14, 1)] 0
lstm_466 (LSTM) (None, 14, 64) 16896
lstm_467 (LSTM) (None, 32) 12416
dropout_505 (Dropout) (None, 32) 0
dense_1321 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_506"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_507 (InputLayer) [(None, 14, 1)] 0
lstm_468 (LSTM) (None, 14, 64) 16896
lstm_469 (LSTM) (None, 32) 12416
dropout_506 (Dropout) (None, 32) 0
dense_1322 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_506"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_507 (InputLayer) [(None, 14, 1)] 0
lstm_468 (LSTM) (None, 14, 64) 16896
lstm_469 (LSTM) (None, 32) 12416
dropout_506 (Dropout) (None, 32) 0
dense_1322 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_507"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_508 (InputLayer) [(None, 14, 1)] 0
lstm_470 (LSTM) (None, 14, 64) 16896
lstm_471 (LSTM) (None, 32) 12416
dropout_507 (Dropout) (None, 32) 0
dense_1323 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_507"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_508 (InputLayer) [(None, 14, 1)] 0
lstm_470 (LSTM) (None, 14, 64) 16896
lstm_471 (LSTM) (None, 32) 12416
dropout_507 (Dropout) (None, 32) 0
dense_1323 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_508"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_509 (InputLayer) [(None, 14, 1)] 0
lstm_472 (LSTM) (None, 14, 64) 16896
lstm_473 (LSTM) (None, 32) 12416
dropout_508 (Dropout) (None, 32) 0
dense_1324 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_508"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_509 (InputLayer) [(None, 14, 1)] 0
lstm_472 (LSTM) (None, 14, 64) 16896
lstm_473 (LSTM) (None, 32) 12416
dropout_508 (Dropout) (None, 32) 0
dense_1324 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_509"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_510 (InputLayer) [(None, 14, 1)] 0
lstm_474 (LSTM) (None, 14, 64) 16896
lstm_475 (LSTM) (None, 32) 12416
dropout_509 (Dropout) (None, 32) 0
dense_1325 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_509"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_510 (InputLayer) [(None, 14, 1)] 0
lstm_474 (LSTM) (None, 14, 64) 16896
lstm_475 (LSTM) (None, 32) 12416
dropout_509 (Dropout) (None, 32) 0
dense_1325 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola14_LSTM14_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best14_lstm_vola14 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM7.
import os
from joblib import dump, load
history_vola14_LSTM14 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM14_vola14):
filename = f'history_vola14_LSTM14_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola14_lstm_14, y=y_train_vola14_14, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best14_lstm_vola14], validation_data=(X_val_vola14_lstm_14, y_val_vola14_14),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola14_LSTM14.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola14_LSTM14_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM14_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM14_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM14_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM14_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM14_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM14_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM14_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM14_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM14_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM14_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM14_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM14_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM14_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM14_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM14_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola14_LSTM14_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best14_lstm_vola14 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best14_lstm_vola14 = load_model(best_model_path) # Cargar el modelo
if best14_lstm_vola14 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola14_LSTM14_weights.08-0.0005.keras con val_loss: 0.0005
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best14_lstm_vola7 is not None:
train_preds_vola14_LSTM14 = best14_lstm_vola14.predict(X_train_vola14_lstm_14)
val_preds_vola14_LSTM14 = best14_lstm_vola14.predict(X_val_vola14_lstm_14)
test_preds_vola14_LSTM14 = best14_lstm_vola14.predict(X_test_vola14_lstm_14)
# Aplanar las predicciones si es necesario
train_preds_vola14_LSTM14 = np.squeeze(train_preds_vola14_LSTM14)
val_preds_vola14_LSTM14 = np.squeeze(val_preds_vola14_LSTM14)
test_preds_vola14_LSTM14 = np.squeeze(test_preds_vola14_LSTM14)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola14_LSTM14)
print("Predicciones de validación:", val_preds_vola14_LSTM14)
print("Predicciones de prueba:", test_preds_vola14_LSTM14)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
155/155 [==============================] - 1s 1ms/step
1/1 [==============================] - 0s 8ms/step
1/1 [==============================] - 0s 9ms/step
Predicciones de Entrenamiento: [0.00191182 0.00191182 0.00191182 ... 0.01740972 0.01652261 0.01648168]
Predicciones de validación: [0.01622669 0.016594 0.01639079 0.01718482 0.01751529 0.01746545
0.01784063 0.016834 0.01730842 0.01736791 0.01764694 0.01822048
0.01682736 0.01679847]
Predicciones de prueba: [0.01677203 0.01942332 0.02176849 0.02935366 0.03108606 0.03110084
0.03096829 0.03095849 0.03435418 0.04038827 0.04064307 0.03991327
0.03902464 0.03921417]
plot_model(data_train_plot_vola14_14[-100:], data_val_plot_vola14_14, data_test_plot_vola14_14, val_preds_vola14_LSTM14, test_preds_vola14_LSTM14, "Predicciones usando Memoria a Corto y Largo Paso (LSTM)")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train14_vola14, jarque_bera_pval_LSTM_train14_vola14 = diagnostic_plots(y_train_vola14_14, train_preds_vola14_LSTM14)
Ljung-Box LB Statistic: 48.574844
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola14_LSTM_train14 = metricas(y_train_vola14_14,train_preds_vola14_LSTM14)
metrica_vola14_LSTM_train14.index = metrica_vola14_LSTM_train14.index.map({0: 'LSTM Entrenamiento Volatilidada ω = 14 y τ = 14'})
metrica_vola14_LSTM_train14['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train14_vola14], index=metrica_vola14_LSTM_train14.index)
metrica_vola14_LSTM_train14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train14_vola14], index=metrica_vola14_LSTM_train14.index)
metrica_vola14_LSTM_train14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Volatilidada ω = 14 y τ = 14 | 1.4431 | 8.83% | 0.0 | 0.0 | 92.6% | 3.1792e-12 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM14_vola14, jarque_bera_pvalLSTM14_vola14 = evaluate_residuals(data_test_plot_vola14_14, test_preds_vola7_LSTM14)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test14_vola14 = metricas(y_test_vola14_14,test_preds_vola14_LSTM14)
metrica_LSTM_test14_vola14.index = metrica_LSTM_test14_vola14.index.map({0: 'LSTM Prueba Volatilidada ω = 14 y τ = 14'})
metrica_LSTM_test14_vola14['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM14_vola14], index=metrica_LSTM_test14_vola14.index)
metrica_LSTM_test14_vola14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM14_vola14], index=metrica_LSTM_test14_vola14.index)
metrica_LSTM_test14_vola14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Volatilidada ω = 14 y τ = 14 | 0.0002 | 8.62% | 0.0 | 0.0 | 71.54% | 0.0408 | 0.4244 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola14_LSTM14)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola14_14, train_preds_vola14_LSTM14, y_val_vola14_14, val_preds_vola14_LSTM14, y_test_vola14_14, test_preds_vola14_LSTM14)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo, ademas muestra un sesgo considerable a la derecha y una alta variabilidad.
Conjunto de Validación: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 21 días (\(\tau=21\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_vola14_lstm_21, X_val_vola14_lstm_21, X_test_vola14_lstm_21 = change_dimension_lstm(X_train_vola14_21, X_val_vola14_21, X_test_vola14_21)
Shape of 3D arrays X: (4915, 21, 1) (21, 21, 1) (21, 21, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(21,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=10, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_vola14_lstm_21, y_train_vola14_21)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 36.4s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 36.5s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 36.5s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 37.3s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 37.6s
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 0.016279017224494513
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape21 = 21
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM21_vola14 = build_models_lstm(input_shape21, neurons_list, dropout_rates, 'SGD')
Model: "model_511"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_512 (InputLayer) [(None, 21, 1)] 0
lstm_478 (LSTM) (None, 21, 64) 16896
lstm_479 (LSTM) (None, 32) 12416
dropout_511 (Dropout) (None, 32) 0
dense_1327 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_511"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_512 (InputLayer) [(None, 21, 1)] 0
lstm_478 (LSTM) (None, 21, 64) 16896
lstm_479 (LSTM) (None, 32) 12416
dropout_511 (Dropout) (None, 32) 0
dense_1327 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_512"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_513 (InputLayer) [(None, 21, 1)] 0
lstm_480 (LSTM) (None, 21, 64) 16896
lstm_481 (LSTM) (None, 32) 12416
dropout_512 (Dropout) (None, 32) 0
dense_1328 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_512"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_513 (InputLayer) [(None, 21, 1)] 0
lstm_480 (LSTM) (None, 21, 64) 16896
lstm_481 (LSTM) (None, 32) 12416
dropout_512 (Dropout) (None, 32) 0
dense_1328 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_513"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_514 (InputLayer) [(None, 21, 1)] 0
lstm_482 (LSTM) (None, 21, 64) 16896
lstm_483 (LSTM) (None, 32) 12416
dropout_513 (Dropout) (None, 32) 0
dense_1329 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_513"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_514 (InputLayer) [(None, 21, 1)] 0
lstm_482 (LSTM) (None, 21, 64) 16896
lstm_483 (LSTM) (None, 32) 12416
dropout_513 (Dropout) (None, 32) 0
dense_1329 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_514"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_515 (InputLayer) [(None, 21, 1)] 0
lstm_484 (LSTM) (None, 21, 64) 16896
lstm_485 (LSTM) (None, 32) 12416
dropout_514 (Dropout) (None, 32) 0
dense_1330 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_514"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_515 (InputLayer) [(None, 21, 1)] 0
lstm_484 (LSTM) (None, 21, 64) 16896
lstm_485 (LSTM) (None, 32) 12416
dropout_514 (Dropout) (None, 32) 0
dense_1330 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_515"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_516 (InputLayer) [(None, 21, 1)] 0
lstm_486 (LSTM) (None, 21, 64) 16896
lstm_487 (LSTM) (None, 32) 12416
dropout_515 (Dropout) (None, 32) 0
dense_1331 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_515"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_516 (InputLayer) [(None, 21, 1)] 0
lstm_486 (LSTM) (None, 21, 64) 16896
lstm_487 (LSTM) (None, 32) 12416
dropout_515 (Dropout) (None, 32) 0
dense_1331 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_516"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_517 (InputLayer) [(None, 21, 1)] 0
lstm_488 (LSTM) (None, 21, 64) 16896
lstm_489 (LSTM) (None, 32) 12416
dropout_516 (Dropout) (None, 32) 0
dense_1332 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_516"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_517 (InputLayer) [(None, 21, 1)] 0
lstm_488 (LSTM) (None, 21, 64) 16896
lstm_489 (LSTM) (None, 32) 12416
dropout_516 (Dropout) (None, 32) 0
dense_1332 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_517"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_518 (InputLayer) [(None, 21, 1)] 0
lstm_490 (LSTM) (None, 21, 64) 16896
lstm_491 (LSTM) (None, 32) 12416
dropout_517 (Dropout) (None, 32) 0
dense_1333 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_517"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_518 (InputLayer) [(None, 21, 1)] 0
lstm_490 (LSTM) (None, 21, 64) 16896
lstm_491 (LSTM) (None, 32) 12416
dropout_517 (Dropout) (None, 32) 0
dense_1333 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_518"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_519 (InputLayer) [(None, 21, 1)] 0
lstm_492 (LSTM) (None, 21, 64) 16896
lstm_493 (LSTM) (None, 32) 12416
dropout_518 (Dropout) (None, 32) 0
dense_1334 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_518"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_519 (InputLayer) [(None, 21, 1)] 0
lstm_492 (LSTM) (None, 21, 64) 16896
lstm_493 (LSTM) (None, 32) 12416
dropout_518 (Dropout) (None, 32) 0
dense_1334 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_519"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_520 (InputLayer) [(None, 21, 1)] 0
lstm_494 (LSTM) (None, 21, 64) 16896
lstm_495 (LSTM) (None, 32) 12416
dropout_519 (Dropout) (None, 32) 0
dense_1335 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_519"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_520 (InputLayer) [(None, 21, 1)] 0
lstm_494 (LSTM) (None, 21, 64) 16896
lstm_495 (LSTM) (None, 32) 12416
dropout_519 (Dropout) (None, 32) 0
dense_1335 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_520"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_521 (InputLayer) [(None, 21, 1)] 0
lstm_496 (LSTM) (None, 21, 64) 16896
lstm_497 (LSTM) (None, 32) 12416
dropout_520 (Dropout) (None, 32) 0
dense_1336 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_520"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_521 (InputLayer) [(None, 21, 1)] 0
lstm_496 (LSTM) (None, 21, 64) 16896
lstm_497 (LSTM) (None, 32) 12416
dropout_520 (Dropout) (None, 32) 0
dense_1336 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_521"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_522 (InputLayer) [(None, 21, 1)] 0
lstm_498 (LSTM) (None, 21, 64) 16896
lstm_499 (LSTM) (None, 32) 12416
dropout_521 (Dropout) (None, 32) 0
dense_1337 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_521"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_522 (InputLayer) [(None, 21, 1)] 0
lstm_498 (LSTM) (None, 21, 64) 16896
lstm_499 (LSTM) (None, 32) 12416
dropout_521 (Dropout) (None, 32) 0
dense_1337 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_522"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_523 (InputLayer) [(None, 21, 1)] 0
lstm_500 (LSTM) (None, 21, 64) 16896
lstm_501 (LSTM) (None, 32) 12416
dropout_522 (Dropout) (None, 32) 0
dense_1338 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_522"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_523 (InputLayer) [(None, 21, 1)] 0
lstm_500 (LSTM) (None, 21, 64) 16896
lstm_501 (LSTM) (None, 32) 12416
dropout_522 (Dropout) (None, 32) 0
dense_1338 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_523"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_524 (InputLayer) [(None, 21, 1)] 0
lstm_502 (LSTM) (None, 21, 64) 16896
lstm_503 (LSTM) (None, 32) 12416
dropout_523 (Dropout) (None, 32) 0
dense_1339 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_523"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_524 (InputLayer) [(None, 21, 1)] 0
lstm_502 (LSTM) (None, 21, 64) 16896
lstm_503 (LSTM) (None, 32) 12416
dropout_523 (Dropout) (None, 32) 0
dense_1339 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_524"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_525 (InputLayer) [(None, 21, 1)] 0
lstm_504 (LSTM) (None, 21, 64) 16896
lstm_505 (LSTM) (None, 32) 12416
dropout_524 (Dropout) (None, 32) 0
dense_1340 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_524"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_525 (InputLayer) [(None, 21, 1)] 0
lstm_504 (LSTM) (None, 21, 64) 16896
lstm_505 (LSTM) (None, 32) 12416
dropout_524 (Dropout) (None, 32) 0
dense_1340 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_525"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_526 (InputLayer) [(None, 21, 1)] 0
lstm_506 (LSTM) (None, 21, 64) 16896
lstm_507 (LSTM) (None, 32) 12416
dropout_525 (Dropout) (None, 32) 0
dense_1341 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_525"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_526 (InputLayer) [(None, 21, 1)] 0
lstm_506 (LSTM) (None, 21, 64) 16896
lstm_507 (LSTM) (None, 32) 12416
dropout_525 (Dropout) (None, 32) 0
dense_1341 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_526"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_527 (InputLayer) [(None, 21, 1)] 0
lstm_508 (LSTM) (None, 21, 64) 16896
lstm_509 (LSTM) (None, 32) 12416
dropout_526 (Dropout) (None, 32) 0
dense_1342 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_526"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_527 (InputLayer) [(None, 21, 1)] 0
lstm_508 (LSTM) (None, 21, 64) 16896
lstm_509 (LSTM) (None, 32) 12416
dropout_526 (Dropout) (None, 32) 0
dense_1342 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola14_LSTM21_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best21_lstm_vola14 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM21.
import os
from joblib import dump, load
history_vola14_LSTM21 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM21_vola14):
filename = f'history_vola14_LSTM21_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola14_lstm_21, y=y_train_vola14_21, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best21_lstm_vola14], validation_data=(X_val_vola14_lstm_21, y_val_vola14_21),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola14_LSTM21.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola14_LSTM21_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM21_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM21_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM21_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM21_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM21_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM21_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM21_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM21_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM21_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM21_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM21_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM21_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM21_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM21_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM21_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola14_LSTM21_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best21_lstm_vola14 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best21_lstm_vola14 = load_model(best_model_path) # Cargar el modelo
if best21_lstm_vola14 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola14_LSTM21_weights.11-0.0011.keras con val_loss: 0.0011
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best21_lstm_vola14 is not None:
train_preds_vola14_LSTM21 = best21_lstm_vola14.predict(X_train_vola14_lstm_21)
val_preds_vola14_LSTM21 = best21_lstm_vola14.predict(X_val_vola14_lstm_21)
test_preds_vola14_LSTM21 = best21_lstm_vola14.predict(X_test_vola14_lstm_21)
# Aplanar las predicciones si es necesario
train_preds_vola14_LSTM21 = np.squeeze(train_preds_vola14_LSTM21)
val_preds_vola14_LSTM21 = np.squeeze(val_preds_vola14_LSTM21)
test_preds_vola14_LSTM21 = np.squeeze(test_preds_vola14_LSTM21)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola14_LSTM21)
print("Predicciones de validación:", val_preds_vola14_LSTM21)
print("Predicciones de prueba:", test_preds_vola14_LSTM21)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
154/154 [==============================] - 1s 2ms/step
1/1 [==============================] - 0s 9ms/step
1/1 [==============================] - 0s 9ms/step
Predicciones de Entrenamiento: [0.00079217 0.00079217 0.00079217 ... 0.03132449 0.03146943 0.03153235]
Predicciones de validación: [0.03155167 0.02620051 0.02651863 0.02620691 0.02633044 0.02326353
0.02333171 0.02228436 0.02321499 0.02308543 0.02312818 0.02112993
0.02102985 0.02110556 0.02135816 0.01587905 0.01573933 0.01685816
0.01705712 0.01611753 0.01615214]
Predicciones de prueba: [0.01584165 0.01629933 0.0160421 0.01698925 0.01730338 0.01724389
0.01766933 0.01646668 0.0170863 0.01708292 0.01742684 0.01806495
0.01642688 0.01650076 0.01642139 0.01949279 0.02200836 0.03063027
0.03207866 0.03207545 0.03174646]
plot_model(data_train_plot_vola21[-100:], data_val_plot_vola21, data_test_plot_vola21, val_preds_vola7_LSTM21, test_preds_vola7_LSTM21, "Predicciones usando Memoria a Corto y Largo Paso (LSTM)")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train21_vola14, jarque_bera_pval_LSTM_train21_vola14 = diagnostic_plots(y_train_vola14_21, train_preds_vola14_LSTM21)
Ljung-Box LB Statistic: 24.029641
Ljung-Box p-value: 0.000001
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola14_LSTM_train21 = metricas(y_train_vola14_21,train_preds_vola14_LSTM21)
metrica_vola14_LSTM_train21.index = metrica_vola14_LSTM_train21.index.map({0: 'LSTM Entrenamiento Volatilidad ω = 14 y τ = 21'})
metrica_vola14_LSTM_train21['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train21_vola14], index=metrica_vola14_LSTM_train21.index)
metrica_vola14_LSTM_train21['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train21_vola14], index=metrica_vola14_LSTM_train21.index)
metrica_vola14_LSTM_train21
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Volatilidad ω = 14 y τ = 21 | 1.3849 | 7.52% | 0.0 | 0.0 | 92.88% | 9.4864e-07 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM21_vola14, jarque_bera_pvalLSTM21_vola14 = evaluate_residuals(data_test_plot_vola14_21, test_preds_vola14_LSTM21)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test21_vola14 = metricas(y_test_vola14_21,test_preds_vola14_LSTM21)
metrica_LSTM_test21_vola14.index = metrica_LSTM_test21_vola14.index.map({0: 'LSTM Prueba Volatilidad ω = 14 y τ = 21'})
metrica_LSTM_test21_vola14['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM21_vola7], index=metrica_LSTM_test21_vola14.index)
metrica_LSTM_test21_vola14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM21_vola7], index=metrica_LSTM_test21_vola14.index)
metrica_LSTM_test21_vola14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Volatilidad ω = 14 y τ = 21 | 0.0001 | 4.92% | 0.0 | 0.0 | 87.77% | 0.0013 | 0.3839 |
Horizonte de 28 días (\(\tau=28\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_vola14_lstm_28, X_val_vola14_lstm_28, X_test_vola14_lstm_28 = change_dimension_lstm(X_train_vola14_28, X_val_vola14_28, X_test_vola14_28)
Shape of 3D arrays X: (4887, 28, 1) (28, 28, 1) (28, 28, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(28,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=10, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_vola14_lstm_28, y_train_vola14_28)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 48.2s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 48.3s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 48.6s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 48.6s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 48.7s
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 0.018750487748698468
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape28 = 28
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM28_vola14 = build_models_lstm(input_shape28, neurons_list, dropout_rates, 'SGD')
Model: "model_528"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_529 (InputLayer) [(None, 28, 1)] 0
lstm_512 (LSTM) (None, 28, 64) 16896
lstm_513 (LSTM) (None, 32) 12416
dropout_528 (Dropout) (None, 32) 0
dense_1344 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_528"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_529 (InputLayer) [(None, 28, 1)] 0
lstm_512 (LSTM) (None, 28, 64) 16896
lstm_513 (LSTM) (None, 32) 12416
dropout_528 (Dropout) (None, 32) 0
dense_1344 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_529"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_530 (InputLayer) [(None, 28, 1)] 0
lstm_514 (LSTM) (None, 28, 64) 16896
lstm_515 (LSTM) (None, 32) 12416
dropout_529 (Dropout) (None, 32) 0
dense_1345 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_529"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_530 (InputLayer) [(None, 28, 1)] 0
lstm_514 (LSTM) (None, 28, 64) 16896
lstm_515 (LSTM) (None, 32) 12416
dropout_529 (Dropout) (None, 32) 0
dense_1345 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_530"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_531 (InputLayer) [(None, 28, 1)] 0
lstm_516 (LSTM) (None, 28, 64) 16896
lstm_517 (LSTM) (None, 32) 12416
dropout_530 (Dropout) (None, 32) 0
dense_1346 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_530"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_531 (InputLayer) [(None, 28, 1)] 0
lstm_516 (LSTM) (None, 28, 64) 16896
lstm_517 (LSTM) (None, 32) 12416
dropout_530 (Dropout) (None, 32) 0
dense_1346 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_531"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_532 (InputLayer) [(None, 28, 1)] 0
lstm_518 (LSTM) (None, 28, 64) 16896
lstm_519 (LSTM) (None, 32) 12416
dropout_531 (Dropout) (None, 32) 0
dense_1347 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_531"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_532 (InputLayer) [(None, 28, 1)] 0
lstm_518 (LSTM) (None, 28, 64) 16896
lstm_519 (LSTM) (None, 32) 12416
dropout_531 (Dropout) (None, 32) 0
dense_1347 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_532"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_533 (InputLayer) [(None, 28, 1)] 0
lstm_520 (LSTM) (None, 28, 64) 16896
lstm_521 (LSTM) (None, 32) 12416
dropout_532 (Dropout) (None, 32) 0
dense_1348 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_532"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_533 (InputLayer) [(None, 28, 1)] 0
lstm_520 (LSTM) (None, 28, 64) 16896
lstm_521 (LSTM) (None, 32) 12416
dropout_532 (Dropout) (None, 32) 0
dense_1348 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_533"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_534 (InputLayer) [(None, 28, 1)] 0
lstm_522 (LSTM) (None, 28, 64) 16896
lstm_523 (LSTM) (None, 32) 12416
dropout_533 (Dropout) (None, 32) 0
dense_1349 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_533"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_534 (InputLayer) [(None, 28, 1)] 0
lstm_522 (LSTM) (None, 28, 64) 16896
lstm_523 (LSTM) (None, 32) 12416
dropout_533 (Dropout) (None, 32) 0
dense_1349 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_534"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_535 (InputLayer) [(None, 28, 1)] 0
lstm_524 (LSTM) (None, 28, 64) 16896
lstm_525 (LSTM) (None, 32) 12416
dropout_534 (Dropout) (None, 32) 0
dense_1350 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_534"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_535 (InputLayer) [(None, 28, 1)] 0
lstm_524 (LSTM) (None, 28, 64) 16896
lstm_525 (LSTM) (None, 32) 12416
dropout_534 (Dropout) (None, 32) 0
dense_1350 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_535"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_536 (InputLayer) [(None, 28, 1)] 0
lstm_526 (LSTM) (None, 28, 64) 16896
lstm_527 (LSTM) (None, 32) 12416
dropout_535 (Dropout) (None, 32) 0
dense_1351 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_535"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_536 (InputLayer) [(None, 28, 1)] 0
lstm_526 (LSTM) (None, 28, 64) 16896
lstm_527 (LSTM) (None, 32) 12416
dropout_535 (Dropout) (None, 32) 0
dense_1351 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_536"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_537 (InputLayer) [(None, 28, 1)] 0
lstm_528 (LSTM) (None, 28, 64) 16896
lstm_529 (LSTM) (None, 32) 12416
dropout_536 (Dropout) (None, 32) 0
dense_1352 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_536"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_537 (InputLayer) [(None, 28, 1)] 0
lstm_528 (LSTM) (None, 28, 64) 16896
lstm_529 (LSTM) (None, 32) 12416
dropout_536 (Dropout) (None, 32) 0
dense_1352 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_537"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_538 (InputLayer) [(None, 28, 1)] 0
lstm_530 (LSTM) (None, 28, 64) 16896
lstm_531 (LSTM) (None, 32) 12416
dropout_537 (Dropout) (None, 32) 0
dense_1353 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_537"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_538 (InputLayer) [(None, 28, 1)] 0
lstm_530 (LSTM) (None, 28, 64) 16896
lstm_531 (LSTM) (None, 32) 12416
dropout_537 (Dropout) (None, 32) 0
dense_1353 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_538"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_539 (InputLayer) [(None, 28, 1)] 0
lstm_532 (LSTM) (None, 28, 64) 16896
lstm_533 (LSTM) (None, 32) 12416
dropout_538 (Dropout) (None, 32) 0
dense_1354 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_538"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_539 (InputLayer) [(None, 28, 1)] 0
lstm_532 (LSTM) (None, 28, 64) 16896
lstm_533 (LSTM) (None, 32) 12416
dropout_538 (Dropout) (None, 32) 0
dense_1354 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_539"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_540 (InputLayer) [(None, 28, 1)] 0
lstm_534 (LSTM) (None, 28, 64) 16896
lstm_535 (LSTM) (None, 32) 12416
dropout_539 (Dropout) (None, 32) 0
dense_1355 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_539"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_540 (InputLayer) [(None, 28, 1)] 0
lstm_534 (LSTM) (None, 28, 64) 16896
lstm_535 (LSTM) (None, 32) 12416
dropout_539 (Dropout) (None, 32) 0
dense_1355 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_540"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_541 (InputLayer) [(None, 28, 1)] 0
lstm_536 (LSTM) (None, 28, 64) 16896
lstm_537 (LSTM) (None, 32) 12416
dropout_540 (Dropout) (None, 32) 0
dense_1356 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_540"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_541 (InputLayer) [(None, 28, 1)] 0
lstm_536 (LSTM) (None, 28, 64) 16896
lstm_537 (LSTM) (None, 32) 12416
dropout_540 (Dropout) (None, 32) 0
dense_1356 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_541"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_542 (InputLayer) [(None, 28, 1)] 0
lstm_538 (LSTM) (None, 28, 64) 16896
lstm_539 (LSTM) (None, 32) 12416
dropout_541 (Dropout) (None, 32) 0
dense_1357 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_541"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_542 (InputLayer) [(None, 28, 1)] 0
lstm_538 (LSTM) (None, 28, 64) 16896
lstm_539 (LSTM) (None, 32) 12416
dropout_541 (Dropout) (None, 32) 0
dense_1357 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_542"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_543 (InputLayer) [(None, 28, 1)] 0
lstm_540 (LSTM) (None, 28, 64) 16896
lstm_541 (LSTM) (None, 32) 12416
dropout_542 (Dropout) (None, 32) 0
dense_1358 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_542"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_543 (InputLayer) [(None, 28, 1)] 0
lstm_540 (LSTM) (None, 28, 64) 16896
lstm_541 (LSTM) (None, 32) 12416
dropout_542 (Dropout) (None, 32) 0
dense_1358 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_543"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_544 (InputLayer) [(None, 28, 1)] 0
lstm_542 (LSTM) (None, 28, 64) 16896
lstm_543 (LSTM) (None, 32) 12416
dropout_543 (Dropout) (None, 32) 0
dense_1359 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_543"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_544 (InputLayer) [(None, 28, 1)] 0
lstm_542 (LSTM) (None, 28, 64) 16896
lstm_543 (LSTM) (None, 32) 12416
dropout_543 (Dropout) (None, 32) 0
dense_1359 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola14_LSTM28_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best28_lstm_vola14 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM28_at.
import os
from joblib import dump, load
history_vola14_LSTM28 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM28_vola14):
filename = f'history_vola14_LSTM28_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola14_lstm_28, y=y_train_vola14_28, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best28_lstm_vola14], validation_data=(X_val_vola14_lstm_28, y_val_vola14_28),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola14_LSTM28.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola14_LSTM28_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM28_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM28_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM28_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM28_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM28_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM28_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM28_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM28_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM28_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM28_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM28_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM28_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM28_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM28_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola14_LSTM28_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola14_LSTM28_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best28_lstm_vola14 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best28_lstm_vola14 = load_model(best_model_path) # Cargar el modelo
if best28_lstm_vola14 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola14_LSTM28_weights.17-0.0014.keras con val_loss: 0.0014
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best28_lstm_vola14 is not None:
train_preds_vola14_LSTM28 = best28_lstm_vola14.predict(X_train_vola14_lstm_28)
val_preds_vola14_LSTM28 = best28_lstm_vola14.predict(X_val_vola14_lstm_28)
test_preds_vola14_LSTM28 = best28_lstm_vola14.predict(X_test_vola14_lstm_28)
# Aplanar las predicciones si es necesario
train_preds_vola14_LSTM28 = np.squeeze(train_preds_vola14_LSTM28)
val_preds_vola14_LSTM28 = np.squeeze(val_preds_vola14_LSTM28)
test_preds_vola14_LSTM28 = np.squeeze(test_preds_vola7_LSTM28)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola14_LSTM28)
print("Predicciones de validación:", val_preds_vola14_LSTM28)
print("Predicciones de prueba:", test_preds_vola14_LSTM28)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
153/153 [==============================] - 1s 2ms/step
1/1 [==============================] - 0s 9ms/step
1/1 [==============================] - 0s 10ms/step
Predicciones de Entrenamiento: [0.00225073 0.00225073 0.00225073 ... 0.02083081 0.01993592 0.01982495]
Predicciones de validación: [0.01889527 0.02082047 0.02087486 0.02319621 0.0246575 0.02462127
0.02455017 0.02404073 0.02971042 0.02927777 0.0288966 0.0281828
0.03517121 0.03519313 0.03579495 0.03390856 0.03376661 0.0315105
0.03148054 0.03167482 0.03171791 0.03172253 0.0265177 0.02694819
0.02673871 0.02680921 0.02379988 0.02391718]
Predicciones de prueba: [-0.14557588 -0.14251196 -0.1415869 -0.1415354 -0.14236629 -0.14519548
-0.14926851 -0.15327644 -0.15760005 -0.16170609 -0.16421509 -0.16462171
-0.16225874 -0.15851378 -0.15507829 -0.15285218 -0.1508125 -0.14873588
-0.14664638 -0.14565146 -0.14520538 -0.14526379 -0.14600646 -0.14747202
-0.15073383 -0.15503848 -0.15885818 -0.16147268]
plot_model(data_train_plot_vola14_28[-100:], data_val_plot_vola14_28, data_test_plot_vola14_28, val_preds_vola14_LSTM28, test_preds_vola14_LSTM28, "Predicciones usando Memoria a Corto y Largo Paso (LSTM)")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train28_vola14, jarque_bera_pval_LSTM_train28_vola14 = diagnostic_plots(y_train_vola14_28, train_preds_vola14_LSTM28)
Ljung-Box LB Statistic: 86.370329
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola14_LSTM_train28 = metricas(y_train_vola14_28,train_preds_vola14_LSTM28)
metrica_vola14_LSTM_train28.index = metrica_vola14_LSTM_train28.index.map({0: 'LSTM Entrenamiento Volatilidad ω = 14 y τ = 28'})
metrica_vola14_LSTM_train28['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train28_vola14], index=metrica_vola14_LSTM_train28.index)
metrica_vola14_LSTM_train28['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train28_vola14], index=metrica_vola14_LSTM_train28.index)
metrica_vola14_LSTM_train28
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Volatilidad ω = 14 y τ = 28 | 1.4339 | 8.84% | 0.0 | 0.0 | 92.63% | 1.4921e-20 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM28_vola14, jarque_bera_pvalLSTM28_vola14 = evaluate_residuals(data_test_plot_vola14_28, test_preds_vola14_LSTM28)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test28_vola14 = metricas(y_test_vola14_28,test_preds_vola14_LSTM28)
metrica_LSTM_test28_vola14.index = metrica_LSTM_test28_vola14.index.map({0: 'LSTM Prueba Volatilidad ω = 14 y τ = 28'})
metrica_LSTM_test28_vola14['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM28_vola14], index=metrica_LSTM_test28_vola14.index)
metrica_LSTM_test28_vola14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM28_vola14], index=metrica_LSTM_test28_vola14.index)
metrica_LSTM_test28_vola14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Volatilidad ω = 14 y τ = 28 | 0.805 | 958.13% | 0.17 | 0.03 | -449719.48% | 4.6261e-06 | 0.51 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola14_LSTM28)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola14_28, train_preds_vola14_LSTM28, y_val_vola14_28, val_preds_vola14_LSTM28, y_test_vola14_28, test_preds_vola14_LSTM28)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra cerca considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra muy cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Volatilidad ω = 21 (Volatilidad_21): Perceptrones Multicapa (MLP)#
Horizonte de 7 días (\(\tau=7\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(7,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': [ 'tanh'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_vola21_7, y_train_vola21_7)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ....activation=tanh, epochs=20, learning_rate=0.001; total time= 4.8s
[CV] END ....activation=tanh, epochs=20, learning_rate=0.001; total time= 4.8s
[CV] END ....activation=tanh, epochs=20, learning_rate=0.001; total time= 4.9s
[CV] END ....activation=tanh, epochs=20, learning_rate=0.001; total time= 5.2s
[CV] END ....activation=tanh, epochs=20, learning_rate=0.001; total time= 5.5s
Mejor función de activación: tanh
Mejor número de epocas: 20
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -0.0030908660031855107
A idexación de parámetros función build_models con base a lo indicado en el númeral 3 del parcial práctico.
input_shape7 = 7
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP7_vola21 = build_models_mlp(input_shape7, neurons_list, dropout_rates, 'tanh')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_545"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_546 (InputLayer) [(None, 7)] 0
dense_1364 (Dense) (None, 32) 256
dense_1365 (Dense) (None, 16) 528
dense_1366 (Dense) (None, 16) 272
dropout_545 (Dropout) (None, 16) 0
dense_1367 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_546"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_547 (InputLayer) [(None, 7)] 0
dense_1368 (Dense) (None, 32) 256
dense_1369 (Dense) (None, 16) 528
dense_1370 (Dense) (None, 16) 272
dropout_546 (Dropout) (None, 16) 0
dense_1371 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_547"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_548 (InputLayer) [(None, 7)] 0
dense_1372 (Dense) (None, 32) 256
dense_1373 (Dense) (None, 16) 528
dense_1374 (Dense) (None, 16) 272
dropout_547 (Dropout) (None, 16) 0
dense_1375 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_548"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_549 (InputLayer) [(None, 7)] 0
dense_1376 (Dense) (None, 32) 256
dense_1377 (Dense) (None, 16) 528
dense_1378 (Dense) (None, 16) 272
dropout_548 (Dropout) (None, 16) 0
dense_1379 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_549"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_550 (InputLayer) [(None, 7)] 0
dense_1380 (Dense) (None, 32) 256
dense_1381 (Dense) (None, 16) 528
dense_1382 (Dense) (None, 16) 272
dropout_549 (Dropout) (None, 16) 0
dense_1383 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_550"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_551 (InputLayer) [(None, 7)] 0
dense_1384 (Dense) (None, 32) 256
dense_1385 (Dense) (None, 16) 528
dense_1386 (Dense) (None, 16) 272
dropout_550 (Dropout) (None, 16) 0
dense_1387 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_551"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_552 (InputLayer) [(None, 7)] 0
dense_1388 (Dense) (None, 32) 256
dense_1389 (Dense) (None, 16) 528
dense_1390 (Dense) (None, 16) 272
dropout_551 (Dropout) (None, 16) 0
dense_1391 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_552"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_553 (InputLayer) [(None, 7)] 0
dense_1392 (Dense) (None, 32) 256
dense_1393 (Dense) (None, 16) 528
dense_1394 (Dense) (None, 16) 272
dropout_552 (Dropout) (None, 16) 0
dense_1395 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_553"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_554 (InputLayer) [(None, 7)] 0
dense_1396 (Dense) (None, 32) 256
dense_1397 (Dense) (None, 16) 528
dense_1398 (Dense) (None, 16) 272
dropout_553 (Dropout) (None, 16) 0
dense_1399 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_554"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_555 (InputLayer) [(None, 7)] 0
dense_1400 (Dense) (None, 32) 256
dense_1401 (Dense) (None, 16) 528
dense_1402 (Dense) (None, 16) 272
dropout_554 (Dropout) (None, 16) 0
dense_1403 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_555"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_556 (InputLayer) [(None, 7)] 0
dense_1404 (Dense) (None, 32) 256
dense_1405 (Dense) (None, 16) 528
dense_1406 (Dense) (None, 16) 272
dropout_555 (Dropout) (None, 16) 0
dense_1407 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_556"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_557 (InputLayer) [(None, 7)] 0
dense_1408 (Dense) (None, 32) 256
dense_1409 (Dense) (None, 16) 528
dense_1410 (Dense) (None, 16) 272
dropout_556 (Dropout) (None, 16) 0
dense_1411 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_557"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_558 (InputLayer) [(None, 7)] 0
dense_1412 (Dense) (None, 32) 256
dense_1413 (Dense) (None, 16) 528
dense_1414 (Dense) (None, 16) 272
dropout_557 (Dropout) (None, 16) 0
dense_1415 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_558"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_559 (InputLayer) [(None, 7)] 0
dense_1416 (Dense) (None, 32) 256
dense_1417 (Dense) (None, 16) 528
dense_1418 (Dense) (None, 16) 272
dropout_558 (Dropout) (None, 16) 0
dense_1419 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_559"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_560 (InputLayer) [(None, 7)] 0
dense_1420 (Dense) (None, 32) 256
dense_1421 (Dense) (None, 16) 528
dense_1422 (Dense) (None, 16) 272
dropout_559 (Dropout) (None, 16) 0
dense_1423 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_560"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_561 (InputLayer) [(None, 7)] 0
dense_1424 (Dense) (None, 32) 256
dense_1425 (Dense) (None, 16) 528
dense_1426 (Dense) (None, 16) 272
dropout_560 (Dropout) (None, 16) 0
dense_1427 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola21_MLP7_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best7_vola21 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP7.
import os
from joblib import dump, load
history_vola21_MPL7 = []
# Iterar sobre cada modelo en la lista models_MLP7
for i, model in enumerate(models_MLP7_vola21):
filename = f'history_vola21_MPL7_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola21_7, y=y_train_vola21_7, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best7_vola21], validation_data=(X_val_vola21_7, y_val_vola21_7),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola21_MPL7.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola21_MPL7_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL7_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL7_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL7_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL7_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL7_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL7_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL7_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL7_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL7_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL7_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL7_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL7_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL7_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL7_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL7_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola21_MLP7_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model7_vola21 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model7_vola21 = load_model(best_model_path) # Cargar el modelo
if best_model7_vola21 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola21_MLP7_weights.20-0.0011.keras con val_loss: 0.0011
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best_model7_vola21 is not None:
train_preds_vola21_MLP7 = best_model7_vola21.predict(X_train_vola21_7)
val_preds_vola21_MLP7 = best_model7_vola21.predict(X_val_vola21_7)
test_preds_vola21_MLP7 = best_model7_vola21.predict(X_test_vola21_7)
# Aplanar las predicciones si es necesario
train_preds_vola21_MLP7 = np.squeeze(train_preds_vola21_MLP7)
val_preds_vola21_MLP7 = np.squeeze(val_preds_vola21_MLP7)
test_preds_vola21_MLP7 = np.squeeze(test_preds_vola21_MLP7)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola21_MLP7)
print("Predicciones de validación:", val_preds_vola21_MLP7)
print("Predicciones de prueba:", test_preds_vola21_MLP7)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
156/156 [==============================] - 0s 204us/step
1/1 [==============================] - 0s 9ms/step
1/1 [==============================] - 0s 15ms/step
Predicciones de Entrenamiento: [0.0037861 0.0037861 0.0037861 ... 0.02936227 0.02886306 0.02919588]
Predicciones de validación: [0.02913963 0.03200969 0.03589875 0.03575275 0.03570248 0.03577608
0.03539895]
Predicciones de prueba: [0.03501672 0.03509945 0.0354888 0.03513949 0.03569656 0.03644694
0.03973022]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 28 días (\(\tau = 28\)).
data_train_plot_vola21_7, data_val_plot_vola21_7, data_test_plot_vola21_7 = data_plot(df_1_st['Volatilidad_21'], 7)
Datos de entrenamiento:
4998 0.0000
4997 0.0000
4996 0.0000
4995 0.0000
4994 0.0000
...
18 0.0355
17 0.0354
16 0.0354
15 0.0351
14 0.0351
Name: Volatilidad_21, Length: 4985, dtype: float647
Datos de validación:
13 0.0354
12 0.0358
11 0.0355
10 0.0361
9 0.0368
8 0.0403
7 0.0410
Name: Volatilidad_21, dtype: float647
Datos de prueba:
6 0.0403
5 0.0442
4 0.0441
3 0.0446
2 0.0449
1 0.0448
0 0.0459
Name: Volatilidad_21, dtype: float647
plot_model(data_train_plot_vola21_7[-100:], data_val_plot_vola21_7, data_test_plot_vola21_7, val_preds_vola21_MLP7, test_preds_vola21_MLP7, "Predicciones usando Perceptrón Multicapa (MLP)")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train7_vola21, jarque_bera_pval_MLP_train7_vola21 = diagnostic_plots(y_train_vola21_7, train_preds_vola21_MLP7)
Ljung-Box LB Statistic: 997.042310
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola21_MLP_train = metricas(y_train_vola21_7,train_preds_vola21_MLP7)
metrica_vola21_MLP_train.index = metrica_vola21_MLP_train.index.map({0: 'MLP Entrenamiento Volatilidad ω = 21 y τ = 7'})
metrica_vola21_MLP_train['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train7_vola21], index=metrica_vola21_MLP_train.index)
metrica_vola21_MLP_train['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train7_vola21], index=metrica_vola21_MLP_train.index)
metrica_vola21_MLP_train
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Volatilidad ω = 21 y τ = 7 | 1.3412 | 8.34% | 0.0 | 0.0 | 92.87% | 7.8915e-219 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP7_vola21, jarque_bera_pvalMLP7_vola21 = evaluate_residuals(data_test_plot_vola21_7, test_preds_vola21_MLP7)
No se rechaza H0: los residuales son independientes (no correlacionados).
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP7_test_vola21 = metricas(y_test_vola21_7,test_preds_vola21_MLP7)
metrica_MLP7_test_vola21.index = metrica_MLP7_test_vola21.index.map({0: 'MLP Prueba Volatilidad ω = 21 y τ = 7'})
metrica_MLP7_test_vola21['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP7_vola21], index=metrica_MLP7_test_vola21.index)
metrica_MLP7_test_vola21['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP7_vola21], index=metrica_MLP7_test_vola21.index)
metrica_MLP7_test_vola21
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Volatilidad ω = 21 y τ = 7 | 1.9325e-05 | 3.08% | 0.0 | 0.0 | 41.9% | 0.9708 | 0.5649 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola21_MPL7)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola21_7, train_preds_vola21_MLP7, y_val_vola21_7, val_preds_vola21_MLP7, y_test_vola21_7, test_preds_vola21_MLP7)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra muy cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 14 días (\(\tau=14\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(14,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': ['tanh'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [50], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_vola21_14, y_train_vola21_14)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 9.4s
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 9.8s
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 9.8s
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 10.0s
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 10.0s
Mejor función de activación: tanh
Mejor número de epocas: 50
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -0.004616295639425516
A continuación, configuramos la red MLP empleando la API funcional de Keras. Con este método, una capa puede ser designada como la entrada para la siguiente capa en el momento de su definición.
En este contexto, Input es una función utilizada para establecer una capa de entrada en un modelo de red neuronal. La propiedad shape=(14,) define la estructura de los datos de entrada, lo que indica que estos tendrán 14 dimensiones. Por otro lado, dtype='float32' especifica que los elementos de esta capa serán números de punto flotante de 32 bits
A continuación se define función buil_models para generación de modelos de acuerdo a los parámetros indexados a tráves de la lista.
A idexación de parámetros función build_models_mlp con base a lo indicado en el númeral 3 del parcial práctico.
input_shape14 = 14
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP14_vola21 = build_models_mlp(input_shape14, neurons_list, dropout_rates, 'tanh')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_562"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_563 (InputLayer) [(None, 14)] 0
dense_1432 (Dense) (None, 32) 480
dense_1433 (Dense) (None, 16) 528
dense_1434 (Dense) (None, 16) 272
dropout_562 (Dropout) (None, 16) 0
dense_1435 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_563"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_564 (InputLayer) [(None, 14)] 0
dense_1436 (Dense) (None, 32) 480
dense_1437 (Dense) (None, 16) 528
dense_1438 (Dense) (None, 16) 272
dropout_563 (Dropout) (None, 16) 0
dense_1439 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_564"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_565 (InputLayer) [(None, 14)] 0
dense_1440 (Dense) (None, 32) 480
dense_1441 (Dense) (None, 16) 528
dense_1442 (Dense) (None, 16) 272
dropout_564 (Dropout) (None, 16) 0
dense_1443 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_565"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_566 (InputLayer) [(None, 14)] 0
dense_1444 (Dense) (None, 32) 480
dense_1445 (Dense) (None, 16) 528
dense_1446 (Dense) (None, 16) 272
dropout_565 (Dropout) (None, 16) 0
dense_1447 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_566"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_567 (InputLayer) [(None, 14)] 0
dense_1448 (Dense) (None, 32) 480
dense_1449 (Dense) (None, 16) 528
dense_1450 (Dense) (None, 16) 272
dropout_566 (Dropout) (None, 16) 0
dense_1451 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_567"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_568 (InputLayer) [(None, 14)] 0
dense_1452 (Dense) (None, 32) 480
dense_1453 (Dense) (None, 16) 528
dense_1454 (Dense) (None, 16) 272
dropout_567 (Dropout) (None, 16) 0
dense_1455 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_568"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_569 (InputLayer) [(None, 14)] 0
dense_1456 (Dense) (None, 32) 480
dense_1457 (Dense) (None, 16) 528
dense_1458 (Dense) (None, 16) 272
dropout_568 (Dropout) (None, 16) 0
dense_1459 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_569"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_570 (InputLayer) [(None, 14)] 0
dense_1460 (Dense) (None, 32) 480
dense_1461 (Dense) (None, 16) 528
dense_1462 (Dense) (None, 16) 272
dropout_569 (Dropout) (None, 16) 0
dense_1463 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_570"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_571 (InputLayer) [(None, 14)] 0
dense_1464 (Dense) (None, 32) 480
dense_1465 (Dense) (None, 16) 528
dense_1466 (Dense) (None, 16) 272
dropout_570 (Dropout) (None, 16) 0
dense_1467 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_571"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_572 (InputLayer) [(None, 14)] 0
dense_1468 (Dense) (None, 32) 480
dense_1469 (Dense) (None, 16) 528
dense_1470 (Dense) (None, 16) 272
dropout_571 (Dropout) (None, 16) 0
dense_1471 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_572"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_573 (InputLayer) [(None, 14)] 0
dense_1472 (Dense) (None, 32) 480
dense_1473 (Dense) (None, 16) 528
dense_1474 (Dense) (None, 16) 272
dropout_572 (Dropout) (None, 16) 0
dense_1475 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_573"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_574 (InputLayer) [(None, 14)] 0
dense_1476 (Dense) (None, 32) 480
dense_1477 (Dense) (None, 16) 528
dense_1478 (Dense) (None, 16) 272
dropout_573 (Dropout) (None, 16) 0
dense_1479 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_574"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_575 (InputLayer) [(None, 14)] 0
dense_1480 (Dense) (None, 32) 480
dense_1481 (Dense) (None, 16) 528
dense_1482 (Dense) (None, 16) 272
dropout_574 (Dropout) (None, 16) 0
dense_1483 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_575"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_576 (InputLayer) [(None, 14)] 0
dense_1484 (Dense) (None, 32) 480
dense_1485 (Dense) (None, 16) 528
dense_1486 (Dense) (None, 16) 272
dropout_575 (Dropout) (None, 16) 0
dense_1487 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_576"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_577 (InputLayer) [(None, 14)] 0
dense_1488 (Dense) (None, 32) 480
dense_1489 (Dense) (None, 16) 528
dense_1490 (Dense) (None, 16) 272
dropout_576 (Dropout) (None, 16) 0
dense_1491 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_577"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_578 (InputLayer) [(None, 14)] 0
dense_1492 (Dense) (None, 32) 480
dense_1493 (Dense) (None, 16) 528
dense_1494 (Dense) (None, 16) 272
dropout_577 (Dropout) (None, 16) 0
dense_1495 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola21_MLP14_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best14_vola21 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP14.
import os
from joblib import dump, load
history_vola21_MPL14 = []
# Iterar sobre cada modelo en la lista models
for i, model in enumerate(models_MLP14_vola21):
filename = f'history_vola21_MPL14_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola21_14, y=y_train_vola21_14, batch_size=16, epochs=50,
verbose=2, callbacks=[save_best14_vola21], validation_data=(X_val_vola21_14, y_val_vola21_14),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola21_MPL14.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola21_MPL14_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL14_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL14_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL14_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL14_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL14_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL14_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL14_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL14_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL14_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL14_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL14_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL14_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL14_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL14_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL14_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola21_MLP14_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model14_vola21 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model14_vola21 = load_model(best_model_path) # Cargar el modelo
if best_model14_vola21 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola21_MLP14_weights.31-0.0007.keras con val_loss: 0.0007
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best_model14_vola21 is not None:
train_preds_vola21_MLP14 = best_model14_vola21.predict(X_train_vola21_14)
val_preds_vola21_MLP14 = best_model14_vola21.predict(X_val_vola21_14)
test_preds_vola21_MLP14 = best_model14_vola21.predict(X_test_vola21_14)
# Aplanar las predicciones si es necesario
train_preds_vola21_MLP14 = np.squeeze(train_preds_vola21_MLP14)
val_preds_vola21_MLP14 = np.squeeze(val_preds_vola21_MLP14)
test_preds_vola21_MLP14 = np.squeeze(test_preds_vola21_MLP14)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola21_MLP14)
print("Predicciones de validación:", val_preds_vola21_MLP14)
print("Predicciones de prueba:", test_preds_vola21_MLP14)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
155/155 [==============================] - 0s 222us/step
1/1 [==============================] - 0s 9ms/step
1/1 [==============================] - 0s 8ms/step
Predicciones de Entrenamiento: [0.00096389 0.00096389 0.00096389 ... 0.01847525 0.02015683 0.02022596]
Predicciones de validación: [0.02016557 0.01642422 0.01684735 0.0180444 0.01770096 0.01594362
0.01656384 0.01637932 0.01616096 0.01568308 0.01571247 0.01633131
0.01690862 0.01673477]
Predicciones de prueba: [0.01619704 0.01876767 0.02032613 0.02681474 0.02800784 0.02734597
0.02763236 0.02751197 0.03080346 0.03511794 0.03477101 0.03459641
0.03437426 0.03399778]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 14 días (\(\tau = 14\)).
data_train_plot_vola21_14, data_val_plot_vola21_14, data_test_plot_vola21_14 = data_plot(df_1_st['Volatilidad_21'], 14)
Datos de entrenamiento:
4998 0.0000
4997 0.0000
4996 0.0000
4995 0.0000
4994 0.0000
...
32 0.0158
31 0.0164
30 0.0169
29 0.0168
28 0.0163
Name: Volatilidad_21, Length: 4971, dtype: float6414
Datos de validación:
27 0.0188
26 0.0203
25 0.0268
24 0.0280
23 0.0274
22 0.0278
21 0.0278
20 0.0313
19 0.0357
18 0.0355
17 0.0354
16 0.0354
15 0.0351
14 0.0351
Name: Volatilidad_21, dtype: float6414
Datos de prueba:
13 0.0354
12 0.0358
11 0.0355
10 0.0361
9 0.0368
8 0.0403
7 0.0410
6 0.0403
5 0.0442
4 0.0441
3 0.0446
2 0.0449
1 0.0448
0 0.0459
Name: Volatilidad_21, dtype: float6414
plot_model(data_train_plot_vola21_14[-100:], data_val_plot_vola21_14, data_test_plot_vola21_14, val_preds_vola21_MLP14, test_preds_vola21_MLP14, "Predicciones usando Perceptrón Multicapa (MLP) para un horizonte de 14 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de validación(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train14_vola21, jarque_bera_pval_MLP_train14_vola21 = diagnostic_plots(y_train_vola21_14, train_preds_vola21_MLP14)
Ljung-Box LB Statistic: 406.850691
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola21_MLP_train14 = metricas(y_train_vola21_14,train_preds_vola21_MLP14)
metrica_vola21_MLP_train14.index = metrica_vola21_MLP_train14.index.map({0: 'MLP Entrenamiento Volatilidad ω = 21 y τ = 14'})
metrica_vola21_MLP_train14['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train14_vola21], index=metrica_vola21_MLP_train14.index)
metrica_vola21_MLP_train14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train14_vola21], index=metrica_vola21_MLP_train14.index)
metrica_vola21_MLP_train14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Volatilidad ω = 21 y τ = 14 | 1.0729 | 6.34% | 0.0 | 0.0 | 94.29% | 1.7769e-90 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP14_vola21, jarque_bera_pvalMLP14_vola21 = evaluate_residuals(data_test_plot_vola21_14, test_preds_vola21_MLP14)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP14_test_vola21= metricas(y_test_vola21_14,test_preds_vola21_MLP14)
metrica_MLP14_test_vola21.index = metrica_MLP14_test_vola21.index.map({0: 'MLP Prueba Volatilidad ω = 21 τ = 14'})
metrica_MLP14_test_vola21['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP14_vola21], index=metrica_MLP14_test_vola21.index)
metrica_MLP14_test_vola21['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP14_vola21], index=metrica_MLP14_test_vola21.index)
metrica_MLP14_test_vola21
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Volatilidad ω = 21 τ = 14 | 9.4877e-05 | 6.48% | 0.0 | 0.0 | 77.73% | 0.0428 | 0.4961 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola21_MPL14)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola21_14, train_preds_vola21_MLP14, y_val_vola21_14, val_preds_vola21_MLP14, y_test_vola21_14, test_preds_vola21_MLP14)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado. Sin embargo, a pesar de esto, se evidencia que existe autocorrelación en los residuales, lo que hace que nuestro modelo no sea confiable.
Horizonte de 21 días (\(\tau=21\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(21,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': ['tanh'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [100], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_vola21_21, y_train_vola21_21)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ...activation=tanh, epochs=100, learning_rate=0.001; total time= 19.4s
[CV] END ...activation=tanh, epochs=100, learning_rate=0.001; total time= 20.1s
[CV] END ...activation=tanh, epochs=100, learning_rate=0.001; total time= 20.1s
[CV] END ...activation=tanh, epochs=100, learning_rate=0.001; total time= 20.3s
[CV] END ...activation=tanh, epochs=100, learning_rate=0.001; total time= 20.4s
Mejor función de activación: tanh
Mejor número de epocas: 100
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -0.004021136136725545
A continuación, configuramos la red MLP empleando la API funcional de Keras. Con este método, una capa puede ser designada como la entrada para la siguiente capa en el momento de su definición.
En este contexto, Input es una función utilizada para establecer una capa de entrada en un modelo de red neuronal. La propiedad shape=(21,) define la estructura de los datos de entrada, lo que indica que estos tendrán 21 dimensiones. Por otro lado, dtype='float32' especifica que los elementos de esta capa serán números de punto flotante de 32 bits.
A continuación se define función buil_models para generación de modelos de acuerdo a los parámetros indexados a tráves de la lista.
A idexación de parámetros función build_models_mlp con base a lo indicado en el númeral 3 del parcial práctico.
input_shape21 = 21
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP21_vola21 = build_models_mlp(input_shape21, neurons_list, dropout_rates, 'tanh')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_579"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_580 (InputLayer) [(None, 21)] 0
dense_1500 (Dense) (None, 32) 704
dense_1501 (Dense) (None, 16) 528
dense_1502 (Dense) (None, 16) 272
dropout_579 (Dropout) (None, 16) 0
dense_1503 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_580"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_581 (InputLayer) [(None, 21)] 0
dense_1504 (Dense) (None, 32) 704
dense_1505 (Dense) (None, 16) 528
dense_1506 (Dense) (None, 16) 272
dropout_580 (Dropout) (None, 16) 0
dense_1507 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_581"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_582 (InputLayer) [(None, 21)] 0
dense_1508 (Dense) (None, 32) 704
dense_1509 (Dense) (None, 16) 528
dense_1510 (Dense) (None, 16) 272
dropout_581 (Dropout) (None, 16) 0
dense_1511 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_582"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_583 (InputLayer) [(None, 21)] 0
dense_1512 (Dense) (None, 32) 704
dense_1513 (Dense) (None, 16) 528
dense_1514 (Dense) (None, 16) 272
dropout_582 (Dropout) (None, 16) 0
dense_1515 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_583"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_584 (InputLayer) [(None, 21)] 0
dense_1516 (Dense) (None, 32) 704
dense_1517 (Dense) (None, 16) 528
dense_1518 (Dense) (None, 16) 272
dropout_583 (Dropout) (None, 16) 0
dense_1519 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_584"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_585 (InputLayer) [(None, 21)] 0
dense_1520 (Dense) (None, 32) 704
dense_1521 (Dense) (None, 16) 528
dense_1522 (Dense) (None, 16) 272
dropout_584 (Dropout) (None, 16) 0
dense_1523 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_585"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_586 (InputLayer) [(None, 21)] 0
dense_1524 (Dense) (None, 32) 704
dense_1525 (Dense) (None, 16) 528
dense_1526 (Dense) (None, 16) 272
dropout_585 (Dropout) (None, 16) 0
dense_1527 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_586"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_587 (InputLayer) [(None, 21)] 0
dense_1528 (Dense) (None, 32) 704
dense_1529 (Dense) (None, 16) 528
dense_1530 (Dense) (None, 16) 272
dropout_586 (Dropout) (None, 16) 0
dense_1531 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_587"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_588 (InputLayer) [(None, 21)] 0
dense_1532 (Dense) (None, 32) 704
dense_1533 (Dense) (None, 16) 528
dense_1534 (Dense) (None, 16) 272
dropout_587 (Dropout) (None, 16) 0
dense_1535 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_588"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_589 (InputLayer) [(None, 21)] 0
dense_1536 (Dense) (None, 32) 704
dense_1537 (Dense) (None, 16) 528
dense_1538 (Dense) (None, 16) 272
dropout_588 (Dropout) (None, 16) 0
dense_1539 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_589"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_590 (InputLayer) [(None, 21)] 0
dense_1540 (Dense) (None, 32) 704
dense_1541 (Dense) (None, 16) 528
dense_1542 (Dense) (None, 16) 272
dropout_589 (Dropout) (None, 16) 0
dense_1543 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_590"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_591 (InputLayer) [(None, 21)] 0
dense_1544 (Dense) (None, 32) 704
dense_1545 (Dense) (None, 16) 528
dense_1546 (Dense) (None, 16) 272
dropout_590 (Dropout) (None, 16) 0
dense_1547 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_591"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_592 (InputLayer) [(None, 21)] 0
dense_1548 (Dense) (None, 32) 704
dense_1549 (Dense) (None, 16) 528
dense_1550 (Dense) (None, 16) 272
dropout_591 (Dropout) (None, 16) 0
dense_1551 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_592"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_593 (InputLayer) [(None, 21)] 0
dense_1552 (Dense) (None, 32) 704
dense_1553 (Dense) (None, 16) 528
dense_1554 (Dense) (None, 16) 272
dropout_592 (Dropout) (None, 16) 0
dense_1555 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_593"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_594 (InputLayer) [(None, 21)] 0
dense_1556 (Dense) (None, 32) 704
dense_1557 (Dense) (None, 16) 528
dense_1558 (Dense) (None, 16) 272
dropout_593 (Dropout) (None, 16) 0
dense_1559 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_594"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_595 (InputLayer) [(None, 21)] 0
dense_1560 (Dense) (None, 32) 704
dense_1561 (Dense) (None, 16) 528
dense_1562 (Dense) (None, 16) 272
dropout_594 (Dropout) (None, 16) 0
dense_1563 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola21_MLP21_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best21_vola21 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP21.
import os
from joblib import dump, load
# Suponiendo que ts_model es una lista de modelos
history_vola21_MPL21 = []
# Iterar sobre cada modelo en la lista models_MLP14
for i, model in enumerate(models_MLP21_vola21):
filename = f'history_vola21_MPL21_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola21_21, y=y_train_vola21_21, batch_size=16, epochs=100,
verbose=2, callbacks=[save_best21_vola21], validation_data=(X_val_vola21_21, y_val_vola21_21),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola21_MPL21.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola21_MPL21_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL21_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL21_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL21_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL21_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL21_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL21_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL21_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL21_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL21_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL21_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL21_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL21_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL21_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL21_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL21_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola21_MLP21_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model21_vola21 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model21_vola21 = load_model(best_model_path) # Cargar el modelo
if best_model21_vola21 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola21_MLP21_weights.33-0.0009.keras con val_loss: 0.0009
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best_model21_vola21 is not None:
train_preds_vola21_MLP21 = best_model21_vola21.predict(X_train_vola21_21)
val_preds_vola21_MLP21 = best_model21_vola21.predict(X_val_vola21_21)
test_preds_vola21_MLP21 = best_model21_vola21.predict(X_test_vola21_21)
# Aplanar las predicciones si es necesario
train_preds_vola21_MLP21 = np.squeeze(train_preds_vola21_MLP21)
val_preds_vola21_MLP21 = np.squeeze(val_preds_vola21_MLP21)
test_preds_vola21_MLP21 = np.squeeze(test_preds_vola21_MLP21)
# Imprimir las predicciones
print('Predicciones de entrenamiento', train_preds_vola21_MLP21)
print("Predicciones de validación:", val_preds_vola21_MLP21)
print("Predicciones de prueba:", test_preds_vola21_MLP21)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
154/154 [==============================] - 0s 200us/step
1/1 [==============================] - 0s 9ms/step
1/1 [==============================] - 0s 9ms/step
Predicciones de entrenamiento [0.00090695 0.00090695 0.00090695 ... 0.03070456 0.03062424 0.03070573]
Predicciones de validación: [0.0308798 0.03066699 0.03026728 0.02884573 0.02775048 0.0297657
0.03009115 0.0300177 0.02637498 0.02595604 0.02599108 0.02605651
0.02037753 0.01999774 0.01952229 0.0191514 0.01901711 0.01976177
0.01881058 0.02030978 0.02031429]
Predicciones de prueba: [0.02008242 0.01652398 0.01668522 0.01795449 0.01781894 0.01621883
0.01652392 0.01663592 0.01646791 0.01594731 0.01566346 0.01654514
0.01716333 0.01691436 0.01634654 0.01892112 0.02054337 0.02705515
0.02855585 0.0279751 0.0280706 ]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 21 días (\(\tau = 21\)).
data_train_plot_vola21_21, data_val_plot_vola21_21, data_test_plot_vola21_21 = data_plot(df_1_st['Volatilidad_21'], 21)
Datos de entrenamiento:
4998 0.0000
4997 0.0000
4996 0.0000
4995 0.0000
4994 0.0000
...
46 0.0202
45 0.0188
44 0.0204
43 0.0204
42 0.0203
Name: Volatilidad_21, Length: 4957, dtype: float6421
Datos de validación:
41 0.0166
40 0.0170
39 0.0182
38 0.0180
37 0.0162
36 0.0168
35 0.0166
34 0.0163
33 0.0159
32 0.0158
31 0.0164
30 0.0169
29 0.0168
28 0.0163
27 0.0188
26 0.0203
25 0.0268
24 0.0280
23 0.0274
22 0.0278
21 0.0278
Name: Volatilidad_21, dtype: float6421
Datos de prueba:
20 0.0313
19 0.0357
18 0.0355
17 0.0354
16 0.0354
15 0.0351
14 0.0351
13 0.0354
12 0.0358
11 0.0355
10 0.0361
9 0.0368
8 0.0403
7 0.0410
6 0.0403
5 0.0442
4 0.0441
3 0.0446
2 0.0449
1 0.0448
0 0.0459
Name: Volatilidad_21, dtype: float6421
plot_model(data_train_plot_vola21_21[-100:], data_val_plot_vola21_21, data_test_plot_vola21_21, val_preds_vola21_MLP21, test_preds_vola21_MLP21, "Predicciones usando Perceptrón Multicapa (MLP) para un horizonte de 21 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de validación(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train21_vola21, jarque_bera_pval_MLP_train21_vola21 = diagnostic_plots(y_train_vola21_21, train_preds_vola21_MLP21)
Ljung-Box LB Statistic: 466.515492
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola21_MLP_train21 = metricas(y_train_vola21_21,train_preds_vola21_MLP21)
metrica_vola21_MLP_train21.index = metrica_vola21_MLP_train21.index.map({0: 'MLP Entrenamiento Volatilidad ω = 21 y τ = 21'})
metrica_vola21_MLP_train21['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train21_vola21], index=metrica_vola21_MLP_train21.index)
metrica_vola21_MLP_train21['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train21_vola21], index=metrica_vola21_MLP_train21.index)
metrica_vola21_MLP_train21
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Volatilidad ω = 21 y τ = 21 | 1.0415 | 5.61% | 0.0 | 0.0 | 94.45% | 1.8367e-103 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP21_vola21, jarque_bera_pvalMLP21_vola21 = evaluate_residuals(data_test_plot_vola21_21, test_preds_vola21_MLP21)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP21_test_vola21 = metricas(y_test_vola21_21,test_preds_vola21_MLP21)
metrica_MLP21_test_vola21.index = metrica_MLP21_test_vola21.index.map({0: 'MLP Prueba Volatilidad ω = 21 y τ = 21'})
metrica_MLP21_test_vola21['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP21_vola21], index=metrica_MLP21_test_vola21.index)
metrica_MLP21_test_vola21['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP21_vola21], index=metrica_MLP21_test_vola21.index)
metrica_MLP21_test_vola21
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Volatilidad ω = 21 y τ = 21 | 6.8749e-05 | 5.6% | 0.0 | 0.0 | 84.5% | 0.0087 | 0.792 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola21_MPL21)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola21_21, train_preds_vola21_MLP21, y_val_vola21_21, val_preds_vola21_MLP21, y_test_vola21_21, test_preds_vola21_MLP21)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejado del error/score correspondiente al val_loss de la epoca del mejor modelo. De igual forma se observa una alta variabilidad en los residuales.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 28 días (\(\tau=28\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(28,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [50], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_vola21_28, y_train_vola21_28)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 8.6s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 8.7s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 8.7s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 8.7s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 9.0s
Mejor función de activación: relu
Mejor número de epocas: 50
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -0.004218885255977511
A continuación, configuramos la red MLP empleando la API funcional de Keras. Con este método, una capa puede ser designada como la entrada para la siguiente capa en el momento de su definición.
En este contexto, Input es una función utilizada para establecer una capa de entrada en un modelo de red neuronal. La propiedad shape=(28,) define la estructura de los datos de entrada, lo que indica que estos tendrán 28 dimensiones. Por otro lado, dtype='float32' especifica que los elementos de esta capa serán números de punto flotante de 32 bits.
A continuación se define función buil_models para generación de modelos de acuerdo a los parámetros indexados a tráves de la lista.
A idexación de parámetros función build_models_mlp con base a lo indicado en el númeral 3 del parcial práctico.
input_shape28 = 28
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP28_vola21 = build_models_mlp(input_shape28, neurons_list, dropout_rates, 'relu')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_596"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_597 (InputLayer) [(None, 28)] 0
dense_1568 (Dense) (None, 32) 928
dense_1569 (Dense) (None, 16) 528
dense_1570 (Dense) (None, 16) 272
dropout_596 (Dropout) (None, 16) 0
dense_1571 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_597"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_598 (InputLayer) [(None, 28)] 0
dense_1572 (Dense) (None, 32) 928
dense_1573 (Dense) (None, 16) 528
dense_1574 (Dense) (None, 16) 272
dropout_597 (Dropout) (None, 16) 0
dense_1575 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_598"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_599 (InputLayer) [(None, 28)] 0
dense_1576 (Dense) (None, 32) 928
dense_1577 (Dense) (None, 16) 528
dense_1578 (Dense) (None, 16) 272
dropout_598 (Dropout) (None, 16) 0
dense_1579 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_599"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_600 (InputLayer) [(None, 28)] 0
dense_1580 (Dense) (None, 32) 928
dense_1581 (Dense) (None, 16) 528
dense_1582 (Dense) (None, 16) 272
dropout_599 (Dropout) (None, 16) 0
dense_1583 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_600"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_601 (InputLayer) [(None, 28)] 0
dense_1584 (Dense) (None, 32) 928
dense_1585 (Dense) (None, 16) 528
dense_1586 (Dense) (None, 16) 272
dropout_600 (Dropout) (None, 16) 0
dense_1587 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_601"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_602 (InputLayer) [(None, 28)] 0
dense_1588 (Dense) (None, 32) 928
dense_1589 (Dense) (None, 16) 528
dense_1590 (Dense) (None, 16) 272
dropout_601 (Dropout) (None, 16) 0
dense_1591 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_602"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_603 (InputLayer) [(None, 28)] 0
dense_1592 (Dense) (None, 32) 928
dense_1593 (Dense) (None, 16) 528
dense_1594 (Dense) (None, 16) 272
dropout_602 (Dropout) (None, 16) 0
dense_1595 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_603"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_604 (InputLayer) [(None, 28)] 0
dense_1596 (Dense) (None, 32) 928
dense_1597 (Dense) (None, 16) 528
dense_1598 (Dense) (None, 16) 272
dropout_603 (Dropout) (None, 16) 0
dense_1599 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_604"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_605 (InputLayer) [(None, 28)] 0
dense_1600 (Dense) (None, 32) 928
dense_1601 (Dense) (None, 16) 528
dense_1602 (Dense) (None, 16) 272
dropout_604 (Dropout) (None, 16) 0
dense_1603 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_605"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_606 (InputLayer) [(None, 28)] 0
dense_1604 (Dense) (None, 32) 928
dense_1605 (Dense) (None, 16) 528
dense_1606 (Dense) (None, 16) 272
dropout_605 (Dropout) (None, 16) 0
dense_1607 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_606"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_607 (InputLayer) [(None, 28)] 0
dense_1608 (Dense) (None, 32) 928
dense_1609 (Dense) (None, 16) 528
dense_1610 (Dense) (None, 16) 272
dropout_606 (Dropout) (None, 16) 0
dense_1611 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_607"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_608 (InputLayer) [(None, 28)] 0
dense_1612 (Dense) (None, 32) 928
dense_1613 (Dense) (None, 16) 528
dense_1614 (Dense) (None, 16) 272
dropout_607 (Dropout) (None, 16) 0
dense_1615 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_608"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_609 (InputLayer) [(None, 28)] 0
dense_1616 (Dense) (None, 32) 928
dense_1617 (Dense) (None, 16) 528
dense_1618 (Dense) (None, 16) 272
dropout_608 (Dropout) (None, 16) 0
dense_1619 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_609"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_610 (InputLayer) [(None, 28)] 0
dense_1620 (Dense) (None, 32) 928
dense_1621 (Dense) (None, 16) 528
dense_1622 (Dense) (None, 16) 272
dropout_609 (Dropout) (None, 16) 0
dense_1623 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_610"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_611 (InputLayer) [(None, 28)] 0
dense_1624 (Dense) (None, 32) 928
dense_1625 (Dense) (None, 16) 528
dense_1626 (Dense) (None, 16) 272
dropout_610 (Dropout) (None, 16) 0
dense_1627 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_611"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_612 (InputLayer) [(None, 28)] 0
dense_1628 (Dense) (None, 32) 928
dense_1629 (Dense) (None, 16) 528
dense_1630 (Dense) (None, 16) 272
dropout_611 (Dropout) (None, 16) 0
dense_1631 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola21_MLP28_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best28_vola21 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP28.
import os
from joblib import dump, load
# Suponiendo que ts_model es una lista de modelos
history_vola21_MPL28 = []
# Iterar sobre cada modelo en la lista models
for i, model in enumerate(models_MLP28_vola21):
filename = f'history_vola21_MPL28_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola21_28, y=y_train_vola21_28, batch_size=16, epochs=50, #Las epocas son las estimadas en el grid search
verbose=2, callbacks=[save_best28_vola21], validation_data=(X_val_vola21_28, y_val_vola21_28),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola21_MPL28.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola21_MPL28_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL28_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL28_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL28_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL28_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL28_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL28_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL28_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL28_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL28_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL28_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL28_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL28_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL28_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL28_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_MPL28_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola21_MLP28_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model28_vola21 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model28_vola21 = load_model(best_model_path) # Cargar el modelo
if best_model28_vola21 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola21_MLP28_weights.47-0.0008.keras con val_loss: 0.0008
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best_model28_vola21 is not None:
train_preds_vola21_MLP28 = best_model28_vola21.predict(X_train_vola21_28)
val_preds_vola21_MLP28 = best_model28_vola21.predict(X_val_vola21_28)
test_preds_vola21_MLP28 = best_model28_vola21.predict(X_test_vola21_28)
# Aplanar las predicciones si es necesario
train_preds_vola21_MLP28 = np.squeeze(train_preds_vola21_MLP28)
val_preds_vola21_MLP28 = np.squeeze(val_preds_vola21_MLP28)
test_preds_vola21_MLP28 = np.squeeze(test_preds_vola14_MLP28)
# Imprimir las predicciones
print("Predicciones de entrenamiento:", train_preds_vola21_MLP28)
print("Predicciones de validación:", val_preds_vola21_MLP28)
print("Predicciones de prueba:", test_preds_vola21_MLP28)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
153/153 [==============================] - 0s 327us/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 76ms/step
Predicciones de entrenamiento: [0.00618984 0.00618984 0.00618984 ... 0.02314616 0.02246777 0.02216766]
Predicciones de validación: [0.02194713 0.02160701 0.02167887 0.02268315 0.02324307 0.02273287
0.02262795 0.02239757 0.02500234 0.02553649 0.02544373 0.02526701
0.02988758 0.03026226 0.03128419 0.03118323 0.0307458 0.02994825
0.03047525 0.0306689 0.03068856 0.03085267 0.03064897 0.03013232
0.02906432 0.02873153 0.03001262 0.03004884]
Predicciones de prueba: [0.02341563 0.02329951 0.0239257 0.0232718 0.02181141 0.02139834
0.02170686 0.02298849 0.0185006 0.01725134 0.01746814 0.01890099
0.01789921 0.01819181 0.01704365 0.01761634 0.01743313 0.01868746
0.01865299 0.01883747 0.0188517 0.01915296 0.01872328 0.01836095
0.01840936 0.01957742 0.01822976 0.01814106]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 28 días (\(\tau = 28\)).
data_train_plot_vola21_28, data_val_plot_vola21_28 , data_test_plot_vola21_28 = data_plot(df_1_st['Volatilidad_21'], 28)
Datos de entrenamiento:
4998 0.0000
4997 0.0000
4996 0.0000
4995 0.0000
4994 0.0000
...
60 0.0290
59 0.0280
58 0.0301
57 0.0302
56 0.0302
Name: Volatilidad_21, Length: 4943, dtype: float6428
Datos de validación:
55 0.0266
54 0.0265
53 0.0263
52 0.0265
51 0.0206
50 0.0207
49 0.0199
48 0.0195
47 0.0194
46 0.0202
45 0.0188
44 0.0204
43 0.0204
42 0.0203
41 0.0166
40 0.0170
39 0.0182
38 0.0180
37 0.0162
36 0.0168
35 0.0166
34 0.0163
33 0.0159
32 0.0158
31 0.0164
30 0.0169
29 0.0168
28 0.0163
Name: Volatilidad_21, dtype: float6428
Datos de prueba:
27 0.0188
26 0.0203
25 0.0268
24 0.0280
23 0.0274
22 0.0278
21 0.0278
20 0.0313
19 0.0357
18 0.0355
17 0.0354
16 0.0354
15 0.0351
14 0.0351
13 0.0354
12 0.0358
11 0.0355
10 0.0361
9 0.0368
8 0.0403
7 0.0410
6 0.0403
5 0.0442
4 0.0441
3 0.0446
2 0.0449
1 0.0448
0 0.0459
Name: Volatilidad_21, dtype: float6428
plot_model(data_train_plot_vola21_28[-100:], data_val_plot_vola21_28, data_test_plot_vola21_28, val_preds_vola21_MLP28, test_preds_vola21_MLP28, "Predicciones usando Perceptrón Multicapa (MLP) para un horizonte de 28 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de validación(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train28_vola21, jarque_bera_pval_MLP_train28_vola21 = diagnostic_plots(y_train_vola21_28, train_preds_vola21_MLP28)
Ljung-Box LB Statistic: 7.843928
Ljung-Box p-value: 0.005099
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola21_MLP_train28 = metricas(y_train_vola21_28,train_preds_vola21_MLP28)
metrica_vola21_MLP_train28.index = metrica_vola21_MLP_train28.index.map({0: 'MLP Entrenamiento Volatilidad ω = 21 y τ = 28'})
metrica_vola21_MLP_train28['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train28_vola21], index=metrica_vola21_MLP_train28.index)
metrica_vola21_MLP_train28['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train28_vola21], index=metrica_vola21_MLP_train28.index)
metrica_vola21_MLP_train28
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Volatilidad ω = 21 y τ = 28 | 0.7178 | 7.63% | 0.0 | 0.0 | 96.17% | 0.0051 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP28_vola21, jarque_bera_pvalMLP28_vola21 = evaluate_residuals(data_test_plot_vola21_28, test_preds_vola21_MLP28)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP28_test_vola21 = metricas(y_test_vola21_28,test_preds_vola21_MLP28)
metrica_MLP28_test_vola21.index = metrica_MLP28_test_vola21.index.map({0: 'MLP Prueba Volatilidad ω = 21 y τ = 28'})
metrica_MLP28_test_vola21['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP28_vola21], index=metrica_MLP28_test_vola21.index)
metrica_MLP28_test_vola21['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP28_vola21], index=metrica_MLP28_test_vola21.index)
metrica_MLP28_test_vola21
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Volatilidad ω = 21 y τ = 28 | 0.0001 | 10.46% | 0.0 | 0.0 | 57.38% | 3.2944e-06 | 0.2533 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola21_MPL28)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola21_28, train_preds_vola21_MLP28, y_val_vola21_28, val_preds_vola21_MLP28, y_test_vola21_28, test_preds_vola21_MLP28)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo. De igual forma se observa una alta variabilidad en los residuales.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Volatilidad ω = 21 (Volatilidad_21): Memoria a Corto y Largo Plazo (LSTM) #
Ya definimos los regresores(X) y la variable objetivo(y) para el proceso de entrenamiento y validación en la selección correspondiente al modelo Perceptrones Multicapa a tráves de la función create_time_series_datasets(), sin embargo, ésta se utiliza para generar arreglos 2D de forma (números de muestras, números de paso de tiempo). Dado que la entrada a las capas de una RNN debe ser de forma: número de muestras, número de paso de tiempo, número de características por paso de tiempo; procedemos con la sefinición de la función change_dimension_lstm() para realizar la transformación de 2D a 3D.
Horizonte de 7 días (\(\tau=7\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_vola21_lstm_7, X_val_vola21_lstm_7, X_test_vola21_lstm_7 = change_dimension_lstm(X_train_vola21_7, X_val_vola21_7, X_test_vola21_7)
Shape of 3D arrays X: (4971, 7, 1) (7, 7, 1) (7, 7, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(7,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=20, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_vola21_lstm_7, y_train_vola21_7)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 21.7s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 22.2s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 22.2s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 22.3s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 22.5s
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 0.021119742557332934
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape7 = 7
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM7_vola21_7 = build_models_lstm(input_shape7, neurons_list, dropout_rates ,'SGD')
Model: "model_613"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_614 (InputLayer) [(None, 7, 1)] 0
lstm_546 (LSTM) (None, 7, 64) 16896
lstm_547 (LSTM) (None, 32) 12416
dropout_613 (Dropout) (None, 32) 0
dense_1633 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_613"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_614 (InputLayer) [(None, 7, 1)] 0
lstm_546 (LSTM) (None, 7, 64) 16896
lstm_547 (LSTM) (None, 32) 12416
dropout_613 (Dropout) (None, 32) 0
dense_1633 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_614"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_615 (InputLayer) [(None, 7, 1)] 0
lstm_548 (LSTM) (None, 7, 64) 16896
lstm_549 (LSTM) (None, 32) 12416
dropout_614 (Dropout) (None, 32) 0
dense_1634 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_614"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_615 (InputLayer) [(None, 7, 1)] 0
lstm_548 (LSTM) (None, 7, 64) 16896
lstm_549 (LSTM) (None, 32) 12416
dropout_614 (Dropout) (None, 32) 0
dense_1634 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_615"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_616 (InputLayer) [(None, 7, 1)] 0
lstm_550 (LSTM) (None, 7, 64) 16896
lstm_551 (LSTM) (None, 32) 12416
dropout_615 (Dropout) (None, 32) 0
dense_1635 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_615"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_616 (InputLayer) [(None, 7, 1)] 0
lstm_550 (LSTM) (None, 7, 64) 16896
lstm_551 (LSTM) (None, 32) 12416
dropout_615 (Dropout) (None, 32) 0
dense_1635 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_616"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_617 (InputLayer) [(None, 7, 1)] 0
lstm_552 (LSTM) (None, 7, 64) 16896
lstm_553 (LSTM) (None, 32) 12416
dropout_616 (Dropout) (None, 32) 0
dense_1636 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_616"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_617 (InputLayer) [(None, 7, 1)] 0
lstm_552 (LSTM) (None, 7, 64) 16896
lstm_553 (LSTM) (None, 32) 12416
dropout_616 (Dropout) (None, 32) 0
dense_1636 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_617"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_618 (InputLayer) [(None, 7, 1)] 0
lstm_554 (LSTM) (None, 7, 64) 16896
lstm_555 (LSTM) (None, 32) 12416
dropout_617 (Dropout) (None, 32) 0
dense_1637 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_617"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_618 (InputLayer) [(None, 7, 1)] 0
lstm_554 (LSTM) (None, 7, 64) 16896
lstm_555 (LSTM) (None, 32) 12416
dropout_617 (Dropout) (None, 32) 0
dense_1637 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_618"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_619 (InputLayer) [(None, 7, 1)] 0
lstm_556 (LSTM) (None, 7, 64) 16896
lstm_557 (LSTM) (None, 32) 12416
dropout_618 (Dropout) (None, 32) 0
dense_1638 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_618"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_619 (InputLayer) [(None, 7, 1)] 0
lstm_556 (LSTM) (None, 7, 64) 16896
lstm_557 (LSTM) (None, 32) 12416
dropout_618 (Dropout) (None, 32) 0
dense_1638 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_619"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_620 (InputLayer) [(None, 7, 1)] 0
lstm_558 (LSTM) (None, 7, 64) 16896
lstm_559 (LSTM) (None, 32) 12416
dropout_619 (Dropout) (None, 32) 0
dense_1639 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_619"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_620 (InputLayer) [(None, 7, 1)] 0
lstm_558 (LSTM) (None, 7, 64) 16896
lstm_559 (LSTM) (None, 32) 12416
dropout_619 (Dropout) (None, 32) 0
dense_1639 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_620"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_621 (InputLayer) [(None, 7, 1)] 0
lstm_560 (LSTM) (None, 7, 64) 16896
lstm_561 (LSTM) (None, 32) 12416
dropout_620 (Dropout) (None, 32) 0
dense_1640 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_620"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_621 (InputLayer) [(None, 7, 1)] 0
lstm_560 (LSTM) (None, 7, 64) 16896
lstm_561 (LSTM) (None, 32) 12416
dropout_620 (Dropout) (None, 32) 0
dense_1640 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_621"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_622 (InputLayer) [(None, 7, 1)] 0
lstm_562 (LSTM) (None, 7, 64) 16896
lstm_563 (LSTM) (None, 32) 12416
dropout_621 (Dropout) (None, 32) 0
dense_1641 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_621"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_622 (InputLayer) [(None, 7, 1)] 0
lstm_562 (LSTM) (None, 7, 64) 16896
lstm_563 (LSTM) (None, 32) 12416
dropout_621 (Dropout) (None, 32) 0
dense_1641 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_622"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_623 (InputLayer) [(None, 7, 1)] 0
lstm_564 (LSTM) (None, 7, 64) 16896
lstm_565 (LSTM) (None, 32) 12416
dropout_622 (Dropout) (None, 32) 0
dense_1642 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_622"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_623 (InputLayer) [(None, 7, 1)] 0
lstm_564 (LSTM) (None, 7, 64) 16896
lstm_565 (LSTM) (None, 32) 12416
dropout_622 (Dropout) (None, 32) 0
dense_1642 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_623"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_624 (InputLayer) [(None, 7, 1)] 0
lstm_566 (LSTM) (None, 7, 64) 16896
lstm_567 (LSTM) (None, 32) 12416
dropout_623 (Dropout) (None, 32) 0
dense_1643 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_623"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_624 (InputLayer) [(None, 7, 1)] 0
lstm_566 (LSTM) (None, 7, 64) 16896
lstm_567 (LSTM) (None, 32) 12416
dropout_623 (Dropout) (None, 32) 0
dense_1643 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_624"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_625 (InputLayer) [(None, 7, 1)] 0
lstm_568 (LSTM) (None, 7, 64) 16896
lstm_569 (LSTM) (None, 32) 12416
dropout_624 (Dropout) (None, 32) 0
dense_1644 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_624"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_625 (InputLayer) [(None, 7, 1)] 0
lstm_568 (LSTM) (None, 7, 64) 16896
lstm_569 (LSTM) (None, 32) 12416
dropout_624 (Dropout) (None, 32) 0
dense_1644 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_625"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_626 (InputLayer) [(None, 7, 1)] 0
lstm_570 (LSTM) (None, 7, 64) 16896
lstm_571 (LSTM) (None, 32) 12416
dropout_625 (Dropout) (None, 32) 0
dense_1645 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_625"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_626 (InputLayer) [(None, 7, 1)] 0
lstm_570 (LSTM) (None, 7, 64) 16896
lstm_571 (LSTM) (None, 32) 12416
dropout_625 (Dropout) (None, 32) 0
dense_1645 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_626"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_627 (InputLayer) [(None, 7, 1)] 0
lstm_572 (LSTM) (None, 7, 64) 16896
lstm_573 (LSTM) (None, 32) 12416
dropout_626 (Dropout) (None, 32) 0
dense_1646 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_626"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_627 (InputLayer) [(None, 7, 1)] 0
lstm_572 (LSTM) (None, 7, 64) 16896
lstm_573 (LSTM) (None, 32) 12416
dropout_626 (Dropout) (None, 32) 0
dense_1646 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_627"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_628 (InputLayer) [(None, 7, 1)] 0
lstm_574 (LSTM) (None, 7, 64) 16896
lstm_575 (LSTM) (None, 32) 12416
dropout_627 (Dropout) (None, 32) 0
dense_1647 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_627"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_628 (InputLayer) [(None, 7, 1)] 0
lstm_574 (LSTM) (None, 7, 64) 16896
lstm_575 (LSTM) (None, 32) 12416
dropout_627 (Dropout) (None, 32) 0
dense_1647 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_628"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_629 (InputLayer) [(None, 7, 1)] 0
lstm_576 (LSTM) (None, 7, 64) 16896
lstm_577 (LSTM) (None, 32) 12416
dropout_628 (Dropout) (None, 32) 0
dense_1648 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_628"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_629 (InputLayer) [(None, 7, 1)] 0
lstm_576 (LSTM) (None, 7, 64) 16896
lstm_577 (LSTM) (None, 32) 12416
dropout_628 (Dropout) (None, 32) 0
dense_1648 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola21_LSTM7_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best7_lstm_vola21 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM7.
import os
from joblib import dump, load
# Suponiendo que ts_model es una lista de modelos
history_vola21_LSTM7 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM7_vola21_7):
filename = f'history_vola21_LSTM7_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola21_lstm_7, y=y_train_vola21_7, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best7_lstm_vola21], validation_data=(X_val_vola21_lstm_7, y_val_vola21_7),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola21_LSTM7.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola21_LSTM7_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM7_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM7_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM7_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM7_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM7_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM7_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM7_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM7_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM7_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM7_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM7_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM7_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM7_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM7_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM7_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola21_LSTM7_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best7_lstm_vola21 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best7_lstm_vola21 = load_model(best_model_path) # Cargar el modelo
if best7_lstm_vola21 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola21_LSTM7_weights.02-0.0010.keras con val_loss: 0.001
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best7_lstm_vola21 is not None:
# Asegúrate de que X_val_price7 y X_test_price7 tengan la forma correcta
train_preds_vola21_LSTM7 = best7_lstm_vola21.predict(X_train_vola21_lstm_7)
val_preds_vola21_LSTM7 = best7_lstm_vola21.predict(X_val_vola21_lstm_7)
test_preds_vola21_LSTM7 = best7_lstm_vola21.predict(X_test_vola21_lstm_7)
# Aplanar las predicciones si es necesario
train_preds_vola21_LSTM7 = np.squeeze(train_preds_vola21_LSTM7)
val_preds_vola21_LSTM7 = np.squeeze(val_preds_vola21_LSTM7)
test_preds_vola21_LSTM7 = np.squeeze(test_preds_vola21_LSTM7)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola21_LSTM7)
print("Predicciones de validación:", val_preds_vola21_LSTM7)
print("Predicciones de prueba:", test_preds_vola21_LSTM7)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
156/156 [==============================] - 1s 921us/step
1/1 [==============================] - 0s 10ms/step
1/1 [==============================] - 0s 9ms/step
Predicciones de Entrenamiento: [0.00585764 0.00585764 0.00585764 ... 0.02948757 0.02868776 0.02892264]
Predicciones de validación: [0.02886204 0.03208293 0.0360367 0.03546178 0.03526539 0.03516462
0.03485497]
Predicciones de prueba: [0.03486354 0.03516125 0.0355188 0.0351285 0.03569671 0.03636714
0.03960679]
plot_model(data_train_plot_vola21[-100:], data_val_plot_vola21, data_test_plot_vola21, val_preds_vola21_LSTM7, test_preds_vola21_LSTM7, "Predicciones usando Memoria a Corto y Largo Paso (LSTM) para un horizonte de 7 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train7_vola21, jarque_bera_pval_LSTM_train7_vola21 = diagnostic_plots(y_train_vola21_7, train_preds_vola21_LSTM7)
Ljung-Box LB Statistic: 710.344928
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola21_LSTM_train = metricas(y_train_vola21_7,train_preds_vola21_LSTM7)
metrica_vola21_LSTM_train.index = metrica_vola21_LSTM_train.index.map({0: 'LSTM Entrenamiento Volatilidad ω = 21 y τ = 7'})
metrica_vola21_LSTM_train['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train7_vola21], index=metrica_vola21_LSTM_train.index)
metrica_vola21_LSTM_train['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train7_vola21], index=metrica_vola21_LSTM_train.index)
metrica_vola21_LSTM_train
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Volatilidad ω = 21 y τ = 7 | 1.2912 | 11.0% | 0.01 | 0.0 | 93.14% | 1.6833e-156 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM7_vola21, jarque_bera_pvalLSTM7_vola21 = evaluate_residuals(data_test_plot_vola21_7, test_preds_vola21_LSTM7)
No se rechaza H0: los residuales son independientes (no correlacionados).
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test_vola21 = metricas(y_test_vola21_7,test_preds_vola21_LSTM7)
metrica_LSTM_test_vola21.index = metrica_LSTM_test_vola21.index.map({0: 'LSTM Prueba Volatilidad ω = 21 y τ = 7'})
metrica_LSTM_test_vola21['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM7_vola21], index=metrica_LSTM_test_vola21.index)
metrica_LSTM_test_vola21['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM7_vola21], index=metrica_LSTM_test_vola21.index)
metrica_LSTM_test_vola21
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Volatilidad ω = 21 y τ = 7 | 2.0359e-05 | 3.2% | 0.0 | 0.0 | 38.79% | 0.9782 | 0.5669 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola21_LSTM7)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola21_7, train_preds_vola21_LSTM7, y_val_vola21_7, val_preds_vola21_LSTM7, y_test_vola21_7, test_preds_vola21_LSTM7)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 14 días (\(\tau=14\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_vola21_lstm_14, X_val_vola21_lstm_14, X_test_vola21_lstm_14 = change_dimension_lstm(X_train_vola21_14, X_val_vola21_14, X_test_vola21_14)
Shape of 3D arrays X: (4943, 14, 1) (14, 14, 1) (14, 14, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(14,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=10, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_vola21_lstm_14, y_train_vola21_14)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 28.1s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 28.6s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 28.6s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 28.6s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 28.7s
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 0.018596852649264046
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape14 = 14
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM14_vola21 = build_models_lstm(input_shape14, neurons_list, dropout_rates, 'SGD')
Model: "model_630"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_631 (InputLayer) [(None, 14, 1)] 0
lstm_580 (LSTM) (None, 14, 64) 16896
lstm_581 (LSTM) (None, 32) 12416
dropout_630 (Dropout) (None, 32) 0
dense_1650 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_630"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_631 (InputLayer) [(None, 14, 1)] 0
lstm_580 (LSTM) (None, 14, 64) 16896
lstm_581 (LSTM) (None, 32) 12416
dropout_630 (Dropout) (None, 32) 0
dense_1650 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_631"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_632 (InputLayer) [(None, 14, 1)] 0
lstm_582 (LSTM) (None, 14, 64) 16896
lstm_583 (LSTM) (None, 32) 12416
dropout_631 (Dropout) (None, 32) 0
dense_1651 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_631"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_632 (InputLayer) [(None, 14, 1)] 0
lstm_582 (LSTM) (None, 14, 64) 16896
lstm_583 (LSTM) (None, 32) 12416
dropout_631 (Dropout) (None, 32) 0
dense_1651 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_632"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_633 (InputLayer) [(None, 14, 1)] 0
lstm_584 (LSTM) (None, 14, 64) 16896
lstm_585 (LSTM) (None, 32) 12416
dropout_632 (Dropout) (None, 32) 0
dense_1652 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_632"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_633 (InputLayer) [(None, 14, 1)] 0
lstm_584 (LSTM) (None, 14, 64) 16896
lstm_585 (LSTM) (None, 32) 12416
dropout_632 (Dropout) (None, 32) 0
dense_1652 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_633"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_634 (InputLayer) [(None, 14, 1)] 0
lstm_586 (LSTM) (None, 14, 64) 16896
lstm_587 (LSTM) (None, 32) 12416
dropout_633 (Dropout) (None, 32) 0
dense_1653 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_633"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_634 (InputLayer) [(None, 14, 1)] 0
lstm_586 (LSTM) (None, 14, 64) 16896
lstm_587 (LSTM) (None, 32) 12416
dropout_633 (Dropout) (None, 32) 0
dense_1653 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_634"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_635 (InputLayer) [(None, 14, 1)] 0
lstm_588 (LSTM) (None, 14, 64) 16896
lstm_589 (LSTM) (None, 32) 12416
dropout_634 (Dropout) (None, 32) 0
dense_1654 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_634"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_635 (InputLayer) [(None, 14, 1)] 0
lstm_588 (LSTM) (None, 14, 64) 16896
lstm_589 (LSTM) (None, 32) 12416
dropout_634 (Dropout) (None, 32) 0
dense_1654 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_635"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_636 (InputLayer) [(None, 14, 1)] 0
lstm_590 (LSTM) (None, 14, 64) 16896
lstm_591 (LSTM) (None, 32) 12416
dropout_635 (Dropout) (None, 32) 0
dense_1655 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_635"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_636 (InputLayer) [(None, 14, 1)] 0
lstm_590 (LSTM) (None, 14, 64) 16896
lstm_591 (LSTM) (None, 32) 12416
dropout_635 (Dropout) (None, 32) 0
dense_1655 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_636"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_637 (InputLayer) [(None, 14, 1)] 0
lstm_592 (LSTM) (None, 14, 64) 16896
lstm_593 (LSTM) (None, 32) 12416
dropout_636 (Dropout) (None, 32) 0
dense_1656 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_636"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_637 (InputLayer) [(None, 14, 1)] 0
lstm_592 (LSTM) (None, 14, 64) 16896
lstm_593 (LSTM) (None, 32) 12416
dropout_636 (Dropout) (None, 32) 0
dense_1656 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_637"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_638 (InputLayer) [(None, 14, 1)] 0
lstm_594 (LSTM) (None, 14, 64) 16896
lstm_595 (LSTM) (None, 32) 12416
dropout_637 (Dropout) (None, 32) 0
dense_1657 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_637"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_638 (InputLayer) [(None, 14, 1)] 0
lstm_594 (LSTM) (None, 14, 64) 16896
lstm_595 (LSTM) (None, 32) 12416
dropout_637 (Dropout) (None, 32) 0
dense_1657 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_638"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_639 (InputLayer) [(None, 14, 1)] 0
lstm_596 (LSTM) (None, 14, 64) 16896
lstm_597 (LSTM) (None, 32) 12416
dropout_638 (Dropout) (None, 32) 0
dense_1658 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_638"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_639 (InputLayer) [(None, 14, 1)] 0
lstm_596 (LSTM) (None, 14, 64) 16896
lstm_597 (LSTM) (None, 32) 12416
dropout_638 (Dropout) (None, 32) 0
dense_1658 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_639"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_640 (InputLayer) [(None, 14, 1)] 0
lstm_598 (LSTM) (None, 14, 64) 16896
lstm_599 (LSTM) (None, 32) 12416
dropout_639 (Dropout) (None, 32) 0
dense_1659 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_639"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_640 (InputLayer) [(None, 14, 1)] 0
lstm_598 (LSTM) (None, 14, 64) 16896
lstm_599 (LSTM) (None, 32) 12416
dropout_639 (Dropout) (None, 32) 0
dense_1659 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_640"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_641 (InputLayer) [(None, 14, 1)] 0
lstm_600 (LSTM) (None, 14, 64) 16896
lstm_601 (LSTM) (None, 32) 12416
dropout_640 (Dropout) (None, 32) 0
dense_1660 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_640"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_641 (InputLayer) [(None, 14, 1)] 0
lstm_600 (LSTM) (None, 14, 64) 16896
lstm_601 (LSTM) (None, 32) 12416
dropout_640 (Dropout) (None, 32) 0
dense_1660 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_641"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_642 (InputLayer) [(None, 14, 1)] 0
lstm_602 (LSTM) (None, 14, 64) 16896
lstm_603 (LSTM) (None, 32) 12416
dropout_641 (Dropout) (None, 32) 0
dense_1661 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_641"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_642 (InputLayer) [(None, 14, 1)] 0
lstm_602 (LSTM) (None, 14, 64) 16896
lstm_603 (LSTM) (None, 32) 12416
dropout_641 (Dropout) (None, 32) 0
dense_1661 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_642"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_643 (InputLayer) [(None, 14, 1)] 0
lstm_604 (LSTM) (None, 14, 64) 16896
lstm_605 (LSTM) (None, 32) 12416
dropout_642 (Dropout) (None, 32) 0
dense_1662 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_642"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_643 (InputLayer) [(None, 14, 1)] 0
lstm_604 (LSTM) (None, 14, 64) 16896
lstm_605 (LSTM) (None, 32) 12416
dropout_642 (Dropout) (None, 32) 0
dense_1662 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_643"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_644 (InputLayer) [(None, 14, 1)] 0
lstm_606 (LSTM) (None, 14, 64) 16896
lstm_607 (LSTM) (None, 32) 12416
dropout_643 (Dropout) (None, 32) 0
dense_1663 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_643"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_644 (InputLayer) [(None, 14, 1)] 0
lstm_606 (LSTM) (None, 14, 64) 16896
lstm_607 (LSTM) (None, 32) 12416
dropout_643 (Dropout) (None, 32) 0
dense_1663 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_644"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_645 (InputLayer) [(None, 14, 1)] 0
lstm_608 (LSTM) (None, 14, 64) 16896
lstm_609 (LSTM) (None, 32) 12416
dropout_644 (Dropout) (None, 32) 0
dense_1664 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_644"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_645 (InputLayer) [(None, 14, 1)] 0
lstm_608 (LSTM) (None, 14, 64) 16896
lstm_609 (LSTM) (None, 32) 12416
dropout_644 (Dropout) (None, 32) 0
dense_1664 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_645"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_646 (InputLayer) [(None, 14, 1)] 0
lstm_610 (LSTM) (None, 14, 64) 16896
lstm_611 (LSTM) (None, 32) 12416
dropout_645 (Dropout) (None, 32) 0
dense_1665 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_645"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_646 (InputLayer) [(None, 14, 1)] 0
lstm_610 (LSTM) (None, 14, 64) 16896
lstm_611 (LSTM) (None, 32) 12416
dropout_645 (Dropout) (None, 32) 0
dense_1665 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola21_LSTM14_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best14_lstm_vola21 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM7.
import os
from joblib import dump, load
history_vola21_LSTM14 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM14_vola21):
filename = f'history_vola21_LSTM14_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola21_lstm_14, y=y_train_vola21_14, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best14_lstm_vola21], validation_data=(X_val_vola21_lstm_14, y_val_vola21_14),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola21_LSTM14.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola21_LSTM14_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM14_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM14_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM14_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM14_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM14_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM14_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM14_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM14_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM14_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM14_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM14_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM14_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM14_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM14_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM14_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola21_LSTM14_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best14_lstm_vola21 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best14_lstm_vola21 = load_model(best_model_path) # Cargar el modelo
if best14_lstm_vola21 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola21_LSTM14_weights.12-0.0007.keras con val_loss: 0.0007
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best14_lstm_vola21 is not None:
train_preds_vola21_LSTM14 = best14_lstm_vola21.predict(X_train_vola21_lstm_14)
val_preds_vola21_LSTM14 = best14_lstm_vola21.predict(X_val_vola21_lstm_14)
test_preds_vola21_LSTM14 = best14_lstm_vola21.predict(X_test_vola21_lstm_14)
# Aplanar las predicciones si es necesario
train_preds_vola21_LSTM14 = np.squeeze(train_preds_vola21_LSTM14)
val_preds_vola21_LSTM14 = np.squeeze(val_preds_vola21_LSTM14)
test_preds_vola21_LSTM14 = np.squeeze(test_preds_vola21_LSTM14)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola21_LSTM14)
print("Predicciones de validación:", val_preds_vola21_LSTM14)
print("Predicciones de prueba:", test_preds_vola21_LSTM14)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
155/155 [==============================] - 1s 3ms/step
1/1 [==============================] - 0s 13ms/step
1/1 [==============================] - 0s 10ms/step
Predicciones de Entrenamiento: [0.00137068 0.00137068 0.00137068 ... 0.01859684 0.01999646 0.02006643]
Predicciones de validación: [0.02001688 0.01669182 0.01688053 0.01792961 0.01778759 0.01625076
0.01671558 0.01651578 0.01629533 0.01587795 0.01582943 0.01634271
0.01683865 0.0167429 ]
Predicciones de prueba: [0.0163111 0.01857354 0.01997992 0.02594029 0.02722446 0.02680098
0.02706948 0.02702812 0.03009801 0.03419576 0.03414332 0.03409775
0.03402049 0.03370049]
plot_model(data_train_plot_vola21_14[-100:], data_val_plot_vola21_14, data_test_plot_vola21_14, val_preds_vola21_LSTM14, test_preds_vola21_LSTM14, "Predicciones usando Memoria a Corto y Largo Paso (LSTM)")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train14_vola21, jarque_bera_pval_LSTM_train14_vola21 = diagnostic_plots(y_train_vola14_21, train_preds_vola14_LSTM21)
Ljung-Box LB Statistic: 24.029641
Ljung-Box p-value: 0.000001
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola21_LSTM_train14 = metricas(y_train_vola21_14,train_preds_vola21_LSTM14)
metrica_vola21_LSTM_train14.index = metrica_vola21_LSTM_train14.index.map({0: 'LSTM Entrenamiento Volatilidada ω = 21 y τ = 14'})
metrica_vola21_LSTM_train14['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train14_vola21], index=metrica_vola21_LSTM_train14.index)
metrica_vola21_LSTM_train14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train14_vola21], index=metrica_vola21_LSTM_train14.index)
metrica_vola21_LSTM_train14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Volatilidada ω = 21 y τ = 14 | 0.9414 | 6.88% | 0.0 | 0.0 | 94.99% | 9.4864e-07 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM14_vola21, jarque_bera_pvalLSTM14_vola21 = evaluate_residuals(data_test_plot_vola21_14, test_preds_vola21_LSTM14)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test14_vola21 = metricas(y_test_vola21_14,test_preds_vola21_LSTM14)
metrica_LSTM_test14_vola21.index = metrica_LSTM_test14_vola21.index.map({0: 'LSTM Prueba Volatilidada ω = 21 y τ = 14'})
metrica_LSTM_test14_vola21['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM14_vola21], index=metrica_LSTM_test14_vola21.index)
metrica_LSTM_test14_vola21['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM14_vola21], index=metrica_LSTM_test14_vola21.index)
metrica_LSTM_test14_vola21
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Volatilidada ω = 21 y τ = 14 | 0.0001 | 7.81% | 0.0 | 0.0 | 71.83% | 0.0401 | 0.507 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola21_LSTM14)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola21_14, train_preds_vola21_LSTM14, y_val_vola21_14, val_preds_vola21_LSTM14, y_test_vola21_14, test_preds_vola21_LSTM14)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo, ademas muestra un sesgo considerable a la derecha y una alta variabilidad.
Conjunto de Validación: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 21 días (\(\tau=21\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_vola21_lstm_21, X_val_vola21_lstm_21, X_test_vola21_lstm_21 = change_dimension_lstm(X_train_vola21_21, X_val_vola21_21, X_test_vola21_21)
Shape of 3D arrays X: (4915, 21, 1) (21, 21, 1) (21, 21, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(21,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=10, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_vola21_lstm_21, y_train_vola21_21)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 42.9s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 43.0s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 43.4s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 43.7s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 43.8s
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 0.01861223444617785
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape21 = 21
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM21_vola21 = build_models_lstm(input_shape21, neurons_list, dropout_rates, 'SGD')
Model: "model_647"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_648 (InputLayer) [(None, 21, 1)] 0
lstm_614 (LSTM) (None, 21, 64) 16896
lstm_615 (LSTM) (None, 32) 12416
dropout_647 (Dropout) (None, 32) 0
dense_1667 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_647"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_648 (InputLayer) [(None, 21, 1)] 0
lstm_614 (LSTM) (None, 21, 64) 16896
lstm_615 (LSTM) (None, 32) 12416
dropout_647 (Dropout) (None, 32) 0
dense_1667 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_648"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_649 (InputLayer) [(None, 21, 1)] 0
lstm_616 (LSTM) (None, 21, 64) 16896
lstm_617 (LSTM) (None, 32) 12416
dropout_648 (Dropout) (None, 32) 0
dense_1668 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_648"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_649 (InputLayer) [(None, 21, 1)] 0
lstm_616 (LSTM) (None, 21, 64) 16896
lstm_617 (LSTM) (None, 32) 12416
dropout_648 (Dropout) (None, 32) 0
dense_1668 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_649"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_650 (InputLayer) [(None, 21, 1)] 0
lstm_618 (LSTM) (None, 21, 64) 16896
lstm_619 (LSTM) (None, 32) 12416
dropout_649 (Dropout) (None, 32) 0
dense_1669 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_649"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_650 (InputLayer) [(None, 21, 1)] 0
lstm_618 (LSTM) (None, 21, 64) 16896
lstm_619 (LSTM) (None, 32) 12416
dropout_649 (Dropout) (None, 32) 0
dense_1669 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_650"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_651 (InputLayer) [(None, 21, 1)] 0
lstm_620 (LSTM) (None, 21, 64) 16896
lstm_621 (LSTM) (None, 32) 12416
dropout_650 (Dropout) (None, 32) 0
dense_1670 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_650"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_651 (InputLayer) [(None, 21, 1)] 0
lstm_620 (LSTM) (None, 21, 64) 16896
lstm_621 (LSTM) (None, 32) 12416
dropout_650 (Dropout) (None, 32) 0
dense_1670 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_651"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_652 (InputLayer) [(None, 21, 1)] 0
lstm_622 (LSTM) (None, 21, 64) 16896
lstm_623 (LSTM) (None, 32) 12416
dropout_651 (Dropout) (None, 32) 0
dense_1671 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_651"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_652 (InputLayer) [(None, 21, 1)] 0
lstm_622 (LSTM) (None, 21, 64) 16896
lstm_623 (LSTM) (None, 32) 12416
dropout_651 (Dropout) (None, 32) 0
dense_1671 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_652"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_653 (InputLayer) [(None, 21, 1)] 0
lstm_624 (LSTM) (None, 21, 64) 16896
lstm_625 (LSTM) (None, 32) 12416
dropout_652 (Dropout) (None, 32) 0
dense_1672 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_652"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_653 (InputLayer) [(None, 21, 1)] 0
lstm_624 (LSTM) (None, 21, 64) 16896
lstm_625 (LSTM) (None, 32) 12416
dropout_652 (Dropout) (None, 32) 0
dense_1672 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_653"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_654 (InputLayer) [(None, 21, 1)] 0
lstm_626 (LSTM) (None, 21, 64) 16896
lstm_627 (LSTM) (None, 32) 12416
dropout_653 (Dropout) (None, 32) 0
dense_1673 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_653"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_654 (InputLayer) [(None, 21, 1)] 0
lstm_626 (LSTM) (None, 21, 64) 16896
lstm_627 (LSTM) (None, 32) 12416
dropout_653 (Dropout) (None, 32) 0
dense_1673 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_654"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_655 (InputLayer) [(None, 21, 1)] 0
lstm_628 (LSTM) (None, 21, 64) 16896
lstm_629 (LSTM) (None, 32) 12416
dropout_654 (Dropout) (None, 32) 0
dense_1674 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_654"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_655 (InputLayer) [(None, 21, 1)] 0
lstm_628 (LSTM) (None, 21, 64) 16896
lstm_629 (LSTM) (None, 32) 12416
dropout_654 (Dropout) (None, 32) 0
dense_1674 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_655"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_656 (InputLayer) [(None, 21, 1)] 0
lstm_630 (LSTM) (None, 21, 64) 16896
lstm_631 (LSTM) (None, 32) 12416
dropout_655 (Dropout) (None, 32) 0
dense_1675 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_655"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_656 (InputLayer) [(None, 21, 1)] 0
lstm_630 (LSTM) (None, 21, 64) 16896
lstm_631 (LSTM) (None, 32) 12416
dropout_655 (Dropout) (None, 32) 0
dense_1675 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_656"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_657 (InputLayer) [(None, 21, 1)] 0
lstm_632 (LSTM) (None, 21, 64) 16896
lstm_633 (LSTM) (None, 32) 12416
dropout_656 (Dropout) (None, 32) 0
dense_1676 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_656"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_657 (InputLayer) [(None, 21, 1)] 0
lstm_632 (LSTM) (None, 21, 64) 16896
lstm_633 (LSTM) (None, 32) 12416
dropout_656 (Dropout) (None, 32) 0
dense_1676 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_657"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_658 (InputLayer) [(None, 21, 1)] 0
lstm_634 (LSTM) (None, 21, 64) 16896
lstm_635 (LSTM) (None, 32) 12416
dropout_657 (Dropout) (None, 32) 0
dense_1677 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_657"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_658 (InputLayer) [(None, 21, 1)] 0
lstm_634 (LSTM) (None, 21, 64) 16896
lstm_635 (LSTM) (None, 32) 12416
dropout_657 (Dropout) (None, 32) 0
dense_1677 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_658"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_659 (InputLayer) [(None, 21, 1)] 0
lstm_636 (LSTM) (None, 21, 64) 16896
lstm_637 (LSTM) (None, 32) 12416
dropout_658 (Dropout) (None, 32) 0
dense_1678 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_658"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_659 (InputLayer) [(None, 21, 1)] 0
lstm_636 (LSTM) (None, 21, 64) 16896
lstm_637 (LSTM) (None, 32) 12416
dropout_658 (Dropout) (None, 32) 0
dense_1678 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_659"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_660 (InputLayer) [(None, 21, 1)] 0
lstm_638 (LSTM) (None, 21, 64) 16896
lstm_639 (LSTM) (None, 32) 12416
dropout_659 (Dropout) (None, 32) 0
dense_1679 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_659"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_660 (InputLayer) [(None, 21, 1)] 0
lstm_638 (LSTM) (None, 21, 64) 16896
lstm_639 (LSTM) (None, 32) 12416
dropout_659 (Dropout) (None, 32) 0
dense_1679 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_660"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_661 (InputLayer) [(None, 21, 1)] 0
lstm_640 (LSTM) (None, 21, 64) 16896
lstm_641 (LSTM) (None, 32) 12416
dropout_660 (Dropout) (None, 32) 0
dense_1680 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_660"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_661 (InputLayer) [(None, 21, 1)] 0
lstm_640 (LSTM) (None, 21, 64) 16896
lstm_641 (LSTM) (None, 32) 12416
dropout_660 (Dropout) (None, 32) 0
dense_1680 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_661"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_662 (InputLayer) [(None, 21, 1)] 0
lstm_642 (LSTM) (None, 21, 64) 16896
lstm_643 (LSTM) (None, 32) 12416
dropout_661 (Dropout) (None, 32) 0
dense_1681 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_661"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_662 (InputLayer) [(None, 21, 1)] 0
lstm_642 (LSTM) (None, 21, 64) 16896
lstm_643 (LSTM) (None, 32) 12416
dropout_661 (Dropout) (None, 32) 0
dense_1681 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_662"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_663 (InputLayer) [(None, 21, 1)] 0
lstm_644 (LSTM) (None, 21, 64) 16896
lstm_645 (LSTM) (None, 32) 12416
dropout_662 (Dropout) (None, 32) 0
dense_1682 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_662"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_663 (InputLayer) [(None, 21, 1)] 0
lstm_644 (LSTM) (None, 21, 64) 16896
lstm_645 (LSTM) (None, 32) 12416
dropout_662 (Dropout) (None, 32) 0
dense_1682 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola21_LSTM21_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best21_lstm_vola21 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM21.
import os
from joblib import dump, load
history_vola21_LSTM21 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM21_vola21):
filename = f'history_vola21_LSTM21_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola21_lstm_21, y=y_train_vola21_21, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best21_lstm_vola21], validation_data=(X_val_vola21_lstm_21, y_val_vola21_21),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola21_LSTM21.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola21_LSTM21_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM21_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM21_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM21_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM21_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM21_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM21_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM21_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM21_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM21_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM21_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM21_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM21_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM21_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM21_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM21_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola21_LSTM21_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best21_lstm_vola21 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best21_lstm_vola21 = load_model(best_model_path) # Cargar el modelo
if best21_lstm_vola21 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola21_LSTM21_weights.16-0.0009.keras con val_loss: 0.0009
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best21_lstm_vola21 is not None:
train_preds_vola21_LSTM21 = best21_lstm_vola21.predict(X_train_vola21_lstm_21)
val_preds_vola21_LSTM21 = best21_lstm_vola21.predict(X_val_vola21_lstm_21)
test_preds_vola21_LSTM21 = best21_lstm_vola21.predict(X_test_vola21_lstm_21)
# Aplanar las predicciones si es necesario
train_preds_vola21_LSTM21 = np.squeeze(train_preds_vola21_LSTM21)
val_preds_vola21_LSTM21 = np.squeeze(val_preds_vola21_LSTM21)
test_preds_vola21_LSTM21 = np.squeeze(test_preds_vola21_LSTM21)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola21_LSTM21)
print("Predicciones de validación:", val_preds_vola21_LSTM21)
print("Predicciones de prueba:", test_preds_vola21_LSTM21)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
154/154 [==============================] - 1s 2ms/step
1/1 [==============================] - 0s 13ms/step
1/1 [==============================] - 0s 10ms/step
Predicciones de Entrenamiento: [0.00065348 0.00065348 0.00065348 ... 0.03063774 0.03060195 0.03058668]
Predicciones de validación: [0.03057549 0.03045306 0.03019798 0.02876581 0.02770138 0.02972274
0.02989157 0.02991899 0.02651528 0.02618231 0.02599606 0.0262001
0.02062528 0.0203962 0.01971085 0.01934091 0.01921374 0.02009562
0.01885409 0.02031451 0.02043748]
Predicciones de prueba: [0.02036349 0.01683692 0.01697856 0.01814366 0.01800072 0.01635892
0.01683744 0.01665791 0.01641026 0.01597091 0.01591787 0.01646969
0.01700701 0.01690847 0.01644375 0.01884063 0.0203766 0.02669264
0.02813905 0.02761963 0.02787962]
plot_model(data_train_plot_vola21[-100:], data_val_plot_vola21, data_test_plot_vola21, val_preds_vola21_LSTM21, test_preds_vola21_LSTM21, "Predicciones usando Memoria a Corto y Largo Paso (LSTM)")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train21_vola21, jarque_bera_pval_LSTM_train21_vola21 = diagnostic_plots(y_train_vola21_21, train_preds_vola21_LSTM21)
Ljung-Box LB Statistic: 0.656326
Ljung-Box p-value: 0.417860
No se rechaza H0: los residuales son independientes (no correlacionados).
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola21_LSTM_train21 = metricas(y_train_vola21_21,train_preds_vola21_LSTM21)
metrica_vola21_LSTM_train21.index = metrica_vola21_LSTM_train21.index.map({0: 'LSTM Entrenamiento Volatilidad ω = 21 y τ = 21'})
metrica_vola21_LSTM_train21['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train21_vola21], index=metrica_vola21_LSTM_train21.index)
metrica_vola21_LSTM_train21['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train21_vola21], index=metrica_vola21_LSTM_train21.index)
metrica_vola21_LSTM_train21
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Volatilidad ω = 21 y τ = 21 | 0.8763 | 5.2% | 0.0 | 0.0 | 95.33% | 0.4179 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM21_vola21, jarque_bera_pvalLSTM21_vola21 = evaluate_residuals(data_test_plot_vola21_21, test_preds_vola21_LSTM21)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test21_vola21 = metricas(y_test_vola21_21,test_preds_vola21_LSTM21)
metrica_LSTM_test21_vola21.index = metrica_LSTM_test21_vola21.index.map({0: 'LSTM Prueba Volatilidad ω = 21 y τ = 21'})
metrica_LSTM_test21_vola21['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM21_vola21], index=metrica_LSTM_test21_vola21.index)
metrica_LSTM_test21_vola21['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM21_vola21], index=metrica_LSTM_test21_vola21.index)
metrica_LSTM_test21_vola21
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Volatilidad ω = 21 y τ = 21 | 7.1796e-05 | 5.49% | 0.0 | 0.0 | 83.82% | 0.0079 | 0.6907 |
Horizonte de 28 días (\(\tau=28\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_vola21_lstm_28, X_val_vola21_lstm_28, X_test_vola21_lstm_28 = change_dimension_lstm(X_train_vola21_28, X_val_vola21_28, X_test_vola21_28)
Shape of 3D arrays X: (4887, 28, 1) (28, 28, 1) (28, 28, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(28,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=10, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_vola21_lstm_28, y_train_vola21_28)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 45.9s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 46.0s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 46.7s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 46.8s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 46.9s
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 0.017083894606823153
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape28 = 28
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM28_vola21 = build_models_lstm(input_shape28, neurons_list, dropout_rates, 'SGD')
Model: "model_664"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_665 (InputLayer) [(None, 28, 1)] 0
lstm_648 (LSTM) (None, 28, 64) 16896
lstm_649 (LSTM) (None, 32) 12416
dropout_664 (Dropout) (None, 32) 0
dense_1684 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_664"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_665 (InputLayer) [(None, 28, 1)] 0
lstm_648 (LSTM) (None, 28, 64) 16896
lstm_649 (LSTM) (None, 32) 12416
dropout_664 (Dropout) (None, 32) 0
dense_1684 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_665"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_666 (InputLayer) [(None, 28, 1)] 0
lstm_650 (LSTM) (None, 28, 64) 16896
lstm_651 (LSTM) (None, 32) 12416
dropout_665 (Dropout) (None, 32) 0
dense_1685 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_665"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_666 (InputLayer) [(None, 28, 1)] 0
lstm_650 (LSTM) (None, 28, 64) 16896
lstm_651 (LSTM) (None, 32) 12416
dropout_665 (Dropout) (None, 32) 0
dense_1685 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_666"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_667 (InputLayer) [(None, 28, 1)] 0
lstm_652 (LSTM) (None, 28, 64) 16896
lstm_653 (LSTM) (None, 32) 12416
dropout_666 (Dropout) (None, 32) 0
dense_1686 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_666"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_667 (InputLayer) [(None, 28, 1)] 0
lstm_652 (LSTM) (None, 28, 64) 16896
lstm_653 (LSTM) (None, 32) 12416
dropout_666 (Dropout) (None, 32) 0
dense_1686 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_667"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_668 (InputLayer) [(None, 28, 1)] 0
lstm_654 (LSTM) (None, 28, 64) 16896
lstm_655 (LSTM) (None, 32) 12416
dropout_667 (Dropout) (None, 32) 0
dense_1687 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_667"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_668 (InputLayer) [(None, 28, 1)] 0
lstm_654 (LSTM) (None, 28, 64) 16896
lstm_655 (LSTM) (None, 32) 12416
dropout_667 (Dropout) (None, 32) 0
dense_1687 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_668"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_669 (InputLayer) [(None, 28, 1)] 0
lstm_656 (LSTM) (None, 28, 64) 16896
lstm_657 (LSTM) (None, 32) 12416
dropout_668 (Dropout) (None, 32) 0
dense_1688 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_668"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_669 (InputLayer) [(None, 28, 1)] 0
lstm_656 (LSTM) (None, 28, 64) 16896
lstm_657 (LSTM) (None, 32) 12416
dropout_668 (Dropout) (None, 32) 0
dense_1688 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_669"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_670 (InputLayer) [(None, 28, 1)] 0
lstm_658 (LSTM) (None, 28, 64) 16896
lstm_659 (LSTM) (None, 32) 12416
dropout_669 (Dropout) (None, 32) 0
dense_1689 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_669"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_670 (InputLayer) [(None, 28, 1)] 0
lstm_658 (LSTM) (None, 28, 64) 16896
lstm_659 (LSTM) (None, 32) 12416
dropout_669 (Dropout) (None, 32) 0
dense_1689 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_670"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_671 (InputLayer) [(None, 28, 1)] 0
lstm_660 (LSTM) (None, 28, 64) 16896
lstm_661 (LSTM) (None, 32) 12416
dropout_670 (Dropout) (None, 32) 0
dense_1690 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_670"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_671 (InputLayer) [(None, 28, 1)] 0
lstm_660 (LSTM) (None, 28, 64) 16896
lstm_661 (LSTM) (None, 32) 12416
dropout_670 (Dropout) (None, 32) 0
dense_1690 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_671"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_672 (InputLayer) [(None, 28, 1)] 0
lstm_662 (LSTM) (None, 28, 64) 16896
lstm_663 (LSTM) (None, 32) 12416
dropout_671 (Dropout) (None, 32) 0
dense_1691 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_671"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_672 (InputLayer) [(None, 28, 1)] 0
lstm_662 (LSTM) (None, 28, 64) 16896
lstm_663 (LSTM) (None, 32) 12416
dropout_671 (Dropout) (None, 32) 0
dense_1691 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_672"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_673 (InputLayer) [(None, 28, 1)] 0
lstm_664 (LSTM) (None, 28, 64) 16896
lstm_665 (LSTM) (None, 32) 12416
dropout_672 (Dropout) (None, 32) 0
dense_1692 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_672"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_673 (InputLayer) [(None, 28, 1)] 0
lstm_664 (LSTM) (None, 28, 64) 16896
lstm_665 (LSTM) (None, 32) 12416
dropout_672 (Dropout) (None, 32) 0
dense_1692 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_673"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_674 (InputLayer) [(None, 28, 1)] 0
lstm_666 (LSTM) (None, 28, 64) 16896
lstm_667 (LSTM) (None, 32) 12416
dropout_673 (Dropout) (None, 32) 0
dense_1693 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_673"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_674 (InputLayer) [(None, 28, 1)] 0
lstm_666 (LSTM) (None, 28, 64) 16896
lstm_667 (LSTM) (None, 32) 12416
dropout_673 (Dropout) (None, 32) 0
dense_1693 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_674"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_675 (InputLayer) [(None, 28, 1)] 0
lstm_668 (LSTM) (None, 28, 64) 16896
lstm_669 (LSTM) (None, 32) 12416
dropout_674 (Dropout) (None, 32) 0
dense_1694 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_674"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_675 (InputLayer) [(None, 28, 1)] 0
lstm_668 (LSTM) (None, 28, 64) 16896
lstm_669 (LSTM) (None, 32) 12416
dropout_674 (Dropout) (None, 32) 0
dense_1694 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_675"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_676 (InputLayer) [(None, 28, 1)] 0
lstm_670 (LSTM) (None, 28, 64) 16896
lstm_671 (LSTM) (None, 32) 12416
dropout_675 (Dropout) (None, 32) 0
dense_1695 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_675"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_676 (InputLayer) [(None, 28, 1)] 0
lstm_670 (LSTM) (None, 28, 64) 16896
lstm_671 (LSTM) (None, 32) 12416
dropout_675 (Dropout) (None, 32) 0
dense_1695 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_676"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_677 (InputLayer) [(None, 28, 1)] 0
lstm_672 (LSTM) (None, 28, 64) 16896
lstm_673 (LSTM) (None, 32) 12416
dropout_676 (Dropout) (None, 32) 0
dense_1696 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_676"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_677 (InputLayer) [(None, 28, 1)] 0
lstm_672 (LSTM) (None, 28, 64) 16896
lstm_673 (LSTM) (None, 32) 12416
dropout_676 (Dropout) (None, 32) 0
dense_1696 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_677"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_678 (InputLayer) [(None, 28, 1)] 0
lstm_674 (LSTM) (None, 28, 64) 16896
lstm_675 (LSTM) (None, 32) 12416
dropout_677 (Dropout) (None, 32) 0
dense_1697 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_677"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_678 (InputLayer) [(None, 28, 1)] 0
lstm_674 (LSTM) (None, 28, 64) 16896
lstm_675 (LSTM) (None, 32) 12416
dropout_677 (Dropout) (None, 32) 0
dense_1697 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_678"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_679 (InputLayer) [(None, 28, 1)] 0
lstm_676 (LSTM) (None, 28, 64) 16896
lstm_677 (LSTM) (None, 32) 12416
dropout_678 (Dropout) (None, 32) 0
dense_1698 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_678"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_679 (InputLayer) [(None, 28, 1)] 0
lstm_676 (LSTM) (None, 28, 64) 16896
lstm_677 (LSTM) (None, 32) 12416
dropout_678 (Dropout) (None, 32) 0
dense_1698 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_679"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_680 (InputLayer) [(None, 28, 1)] 0
lstm_678 (LSTM) (None, 28, 64) 16896
lstm_679 (LSTM) (None, 32) 12416
dropout_679 (Dropout) (None, 32) 0
dense_1699 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_679"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_680 (InputLayer) [(None, 28, 1)] 0
lstm_678 (LSTM) (None, 28, 64) 16896
lstm_679 (LSTM) (None, 32) 12416
dropout_679 (Dropout) (None, 32) 0
dense_1699 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola21_LSTM28_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best28_lstm_vola21 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM28_at.
import os
from joblib import dump, load
history_vola21_LSTM28 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM28_vola21):
filename = f'history_vola21_LSTM28_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola21_lstm_28, y=y_train_vola21_28, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best28_lstm_vola21], validation_data=(X_val_vola21_lstm_28, y_val_vola21_28),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola21_LSTM28.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola21_LSTM28_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM28_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM28_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM28_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM28_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM28_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM28_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM28_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM28_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM28_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM28_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM28_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM28_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM28_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM28_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola21_LSTM28_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola21_LSTM28_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best28_lstm_vola21 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best28_lstm_vola21 = load_model(best_model_path) # Cargar el modelo
if best28_lstm_vola21 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola21_LSTM28_weights.18-0.0008.keras con val_loss: 0.0008
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best28_lstm_vola21 is not None:
train_preds_vola21_LSTM28 = best28_lstm_vola21.predict(X_train_vola21_lstm_28)
val_preds_vola21_LSTM28 = best28_lstm_vola21.predict(X_val_vola21_lstm_28)
test_preds_vola21_LSTM28 = best28_lstm_vola21.predict(X_test_vola21_lstm_28)
# Aplanar las predicciones si es necesario
train_preds_vola21_LSTM28 = np.squeeze(train_preds_vola21_LSTM28)
val_preds_vola21_LSTM28 = np.squeeze(val_preds_vola21_LSTM28)
test_preds_vola21_LSTM28 = np.squeeze(test_preds_vola21_LSTM28)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola21_LSTM28)
print("Predicciones de validación:", val_preds_vola21_LSTM28)
print("Predicciones de prueba:", test_preds_vola21_LSTM28)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
153/153 [==============================] - 1s 3ms/step
1/1 [==============================] - 0s 17ms/step
1/1 [==============================] - 0s 11ms/step
Predicciones de Entrenamiento: [0.0009957 0.0009957 0.0009957 ... 0.02293545 0.02256175 0.0224879 ]
Predicciones de validación: [0.02251082 0.02081096 0.02097826 0.02258164 0.02357003 0.02277881
0.02275437 0.02215185 0.02568892 0.02607032 0.02526024 0.02530244
0.03043149 0.03052609 0.03073953 0.03084078 0.03051634 0.03019146
0.03079004 0.03076506 0.03075091 0.03074311 0.03062394 0.03037234
0.02893647 0.02790142 0.02999138 0.03012735]
Predicciones de prueba: [0.03010623 0.02664904 0.02639517 0.02627532 0.02649558 0.0208377
0.02072348 0.02011058 0.01974697 0.01959611 0.02045862 0.01914769
0.02061931 0.02070809 0.02060062 0.01702055 0.01726113 0.01850034
0.01832521 0.01662601 0.01713833 0.01696429 0.01670713 0.01625929
0.01621382 0.01677578 0.01730042 0.01717015]
plot_model(data_train_plot_vola21_28[-100:], data_val_plot_vola21_28, data_test_plot_vola21_28, val_preds_vola21_LSTM28, test_preds_vola21_LSTM28, "Predicciones usando Memoria a Corto y Largo Paso (LSTM)")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train28_vola21, jarque_bera_pval_LSTM_train28_vola21 = diagnostic_plots(y_train_vola21_28, train_preds_vola21_LSTM28)
Ljung-Box LB Statistic: 25.459878
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola21_LSTM_train28 = metricas(y_train_vola21_28,train_preds_vola21_LSTM28)
metrica_vola21_LSTM_train28.index = metrica_vola21_LSTM_train28.index.map({0: 'LSTM Entrenamiento Volatilidad ω = 21 y τ = 28'})
metrica_vola21_LSTM_train28['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train28_vola21], index=metrica_vola21_LSTM_train28.index)
metrica_vola21_LSTM_train28['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train28_vola21], index=metrica_vola21_LSTM_train28.index)
metrica_vola21_LSTM_train28
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Volatilidad ω = 21 y τ = 28 | 0.8814 | 5.44% | 0.0 | 0.0 | 95.3% | 4.5168e-07 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM28_vola21, jarque_bera_pvalLSTM28_vola21 = evaluate_residuals(data_test_plot_vola21_28, test_preds_vola21_LSTM28)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test28_vola21 = metricas(y_test_vola21_28,test_preds_vola21_LSTM28)
metrica_LSTM_test28_vola21.index = metrica_LSTM_test28_vola21.index.map({0: 'LSTM Prueba Volatilidad ω = 21 y τ = 28'})
metrica_LSTM_test28_vola21['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM28_vola21], index=metrica_LSTM_test28_vola21.index)
metrica_LSTM_test28_vola21['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM28_vola21], index=metrica_LSTM_test28_vola21.index)
metrica_LSTM_test28_vola21
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Volatilidad ω = 21 y τ = 28 | 7.6550e-05 | 5.21% | 0.0 | 0.0 | 75.75% | 4.2251e-06 | 0.2692 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola21_LSTM28)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola21_28, train_preds_vola21_LSTM28, y_val_vola21_28, val_preds_vola21_LSTM28, y_test_vola21_28, test_preds_vola21_LSTM28)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Volatilidad ω = 28 (Volatilidad_28): Perceptrones Multicapa (MLP)#
Horizonte de 7 días (\(\tau=7\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(7,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': [ 'tanh'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_vola28_7, y_train_vola28_7)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ....activation=tanh, epochs=20, learning_rate=0.001; total time= 5.2s
[CV] END ....activation=tanh, epochs=20, learning_rate=0.001; total time= 5.5s
[CV] END ....activation=tanh, epochs=20, learning_rate=0.001; total time= 5.7s
[CV] END ....activation=tanh, epochs=20, learning_rate=0.001; total time= 5.7s
[CV] END ....activation=tanh, epochs=20, learning_rate=0.001; total time= 6.6s
Mejor función de activación: tanh
Mejor número de epocas: 20
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -0.0026480191387236117
A idexación de parámetros función build_models con base a lo indicado en el númeral 3 del parcial práctico.
input_shape7 = 7
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP7_vola28 = build_models_mlp(input_shape7, neurons_list, dropout_rates, 'tanh')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_681"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_682 (InputLayer) [(None, 7)] 0
dense_1704 (Dense) (None, 32) 256
dense_1705 (Dense) (None, 16) 528
dense_1706 (Dense) (None, 16) 272
dropout_681 (Dropout) (None, 16) 0
dense_1707 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_682"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_683 (InputLayer) [(None, 7)] 0
dense_1708 (Dense) (None, 32) 256
dense_1709 (Dense) (None, 16) 528
dense_1710 (Dense) (None, 16) 272
dropout_682 (Dropout) (None, 16) 0
dense_1711 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_683"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_684 (InputLayer) [(None, 7)] 0
dense_1712 (Dense) (None, 32) 256
dense_1713 (Dense) (None, 16) 528
dense_1714 (Dense) (None, 16) 272
dropout_683 (Dropout) (None, 16) 0
dense_1715 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_684"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_685 (InputLayer) [(None, 7)] 0
dense_1716 (Dense) (None, 32) 256
dense_1717 (Dense) (None, 16) 528
dense_1718 (Dense) (None, 16) 272
dropout_684 (Dropout) (None, 16) 0
dense_1719 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_685"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_686 (InputLayer) [(None, 7)] 0
dense_1720 (Dense) (None, 32) 256
dense_1721 (Dense) (None, 16) 528
dense_1722 (Dense) (None, 16) 272
dropout_685 (Dropout) (None, 16) 0
dense_1723 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_686"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_687 (InputLayer) [(None, 7)] 0
dense_1724 (Dense) (None, 32) 256
dense_1725 (Dense) (None, 16) 528
dense_1726 (Dense) (None, 16) 272
dropout_686 (Dropout) (None, 16) 0
dense_1727 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_687"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_688 (InputLayer) [(None, 7)] 0
dense_1728 (Dense) (None, 32) 256
dense_1729 (Dense) (None, 16) 528
dense_1730 (Dense) (None, 16) 272
dropout_687 (Dropout) (None, 16) 0
dense_1731 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_688"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_689 (InputLayer) [(None, 7)] 0
dense_1732 (Dense) (None, 32) 256
dense_1733 (Dense) (None, 16) 528
dense_1734 (Dense) (None, 16) 272
dropout_688 (Dropout) (None, 16) 0
dense_1735 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_689"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_690 (InputLayer) [(None, 7)] 0
dense_1736 (Dense) (None, 32) 256
dense_1737 (Dense) (None, 16) 528
dense_1738 (Dense) (None, 16) 272
dropout_689 (Dropout) (None, 16) 0
dense_1739 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_690"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_691 (InputLayer) [(None, 7)] 0
dense_1740 (Dense) (None, 32) 256
dense_1741 (Dense) (None, 16) 528
dense_1742 (Dense) (None, 16) 272
dropout_690 (Dropout) (None, 16) 0
dense_1743 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_691"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_692 (InputLayer) [(None, 7)] 0
dense_1744 (Dense) (None, 32) 256
dense_1745 (Dense) (None, 16) 528
dense_1746 (Dense) (None, 16) 272
dropout_691 (Dropout) (None, 16) 0
dense_1747 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_692"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_693 (InputLayer) [(None, 7)] 0
dense_1748 (Dense) (None, 32) 256
dense_1749 (Dense) (None, 16) 528
dense_1750 (Dense) (None, 16) 272
dropout_692 (Dropout) (None, 16) 0
dense_1751 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_693"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_694 (InputLayer) [(None, 7)] 0
dense_1752 (Dense) (None, 32) 256
dense_1753 (Dense) (None, 16) 528
dense_1754 (Dense) (None, 16) 272
dropout_693 (Dropout) (None, 16) 0
dense_1755 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_694"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_695 (InputLayer) [(None, 7)] 0
dense_1756 (Dense) (None, 32) 256
dense_1757 (Dense) (None, 16) 528
dense_1758 (Dense) (None, 16) 272
dropout_694 (Dropout) (None, 16) 0
dense_1759 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_695"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_696 (InputLayer) [(None, 7)] 0
dense_1760 (Dense) (None, 32) 256
dense_1761 (Dense) (None, 16) 528
dense_1762 (Dense) (None, 16) 272
dropout_695 (Dropout) (None, 16) 0
dense_1763 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_696"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_697 (InputLayer) [(None, 7)] 0
dense_1764 (Dense) (None, 32) 256
dense_1765 (Dense) (None, 16) 528
dense_1766 (Dense) (None, 16) 272
dropout_696 (Dropout) (None, 16) 0
dense_1767 (Dense) (None, 1) 17
=================================================================
Total params: 1,073
Trainable params: 1,073
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola28_MLP7_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best7_vola28 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP7.
import os
from joblib import dump, load
history_vola28_MPL7 = []
# Iterar sobre cada modelo en la lista models_MLP7
for i, model in enumerate(models_MLP7_vola28):
filename = f'history_vola28_MPL7_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola28_7, y=y_train_vola28_7, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best7_vola28], validation_data=(X_val_vola28_7, y_val_vola28_7),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola28_MPL7.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola28_MPL7_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL7_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL7_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL7_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL7_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL7_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL7_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL7_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL7_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL7_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL7_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL7_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL7_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL7_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL7_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL7_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola28_MLP7_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model7_vola28 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model7_vola28 = load_model(best_model_path) # Cargar el modelo
if best_model7_vola28 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola28_MLP7_weights.07-0.0010.keras con val_loss: 0.001
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best_model7_vola28 is not None:
train_preds_vola28_MLP7 = best_model7_vola28.predict(X_train_vola28_7)
val_preds_vola28_MLP7 = best_model7_vola28.predict(X_val_vola28_7)
test_preds_vola28_MLP7 = best_model7_vola28.predict(X_test_vola28_7)
# Aplanar las predicciones si es necesario
train_preds_vola28_MLP7 = np.squeeze(train_preds_vola28_MLP7)
val_preds_vola28_MLP7 = np.squeeze(val_preds_vola28_MLP7)
test_preds_vola28_MLP7 = np.squeeze(test_preds_vola28_MLP7)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola28_MLP7)
print("Predicciones de validación:", val_preds_vola28_MLP7)
print("Predicciones de prueba:", test_preds_vola28_MLP7)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
156/156 [==============================] - 0s 857us/step
1/1 [==============================] - 0s 11ms/step
1/1 [==============================] - 0s 10ms/step
Predicciones de Entrenamiento: [0.00255071 0.00255071 0.00255071 ... 0.02623877 0.02583994 0.02579919]
Predicciones de validación: [0.02548368 0.02780309 0.03172392 0.03231183 0.0319136 0.03159869
0.03161928]
Predicciones de prueba: [0.03159535 0.03178523 0.03192443 0.03155926 0.03203966 0.03288624
0.03500837]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 28 días (\(\tau = 28\)).
data_train_plot_vola28_7, data_val_plot_vola28_7, data_test_plot_vola28_7 = data_plot(df_1_st['Volatilidad_28'], 7)
Datos de entrenamiento:
4998 0.0000
4997 0.0000
4996 0.0000
4995 0.0000
4994 0.0000
...
18 0.0320
17 0.0319
16 0.0315
15 0.0316
14 0.0316
Name: Volatilidad_28, Length: 4985, dtype: float647
Datos de validación:
13 0.0320
12 0.0321
11 0.0316
10 0.0323
9 0.0331
8 0.0355
7 0.0362
Name: Volatilidad_28, dtype: float647
Datos de prueba:
6 0.0363
5 0.0403
4 0.0434
3 0.0440
2 0.0444
1 0.0444
0 0.0450
Name: Volatilidad_28, dtype: float647
plot_model(data_train_plot_vola28_7[-100:], data_val_plot_vola28_7, data_test_plot_vola28_7, val_preds_vola28_MLP7, test_preds_vola28_MLP7, "Predicciones usando Perceptrón Multicapa (MLP)")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train7_vola28, jarque_bera_pval_MLP_train7_vola28 = diagnostic_plots(y_train_vola28_7, train_preds_vola28_MLP7)
Ljung-Box LB Statistic: 1238.240659
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola28_MLP_train = metricas(y_train_vola28_7,train_preds_vola28_MLP7)
metrica_vola28_MLP_train.index = metrica_vola28_MLP_train.index.map({0: 'MLP Entrenamiento Volatilidad ω = 28 y τ = 7'})
metrica_vola28_MLP_train['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train7_vola28], index=metrica_vola28_MLP_train.index)
metrica_vola28_MLP_train['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train7_vola28], index=metrica_vola28_MLP_train.index)
metrica_vola28_MLP_train
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Volatilidad ω = 28 y τ = 7 | 1.0324 | 5.97% | 0.0 | 0.0 | 94.36% | 2.9829e-271 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP7_vola28, jarque_bera_pvalMLP7_vola28 = evaluate_residuals(data_test_plot_vola28_7, test_preds_vola28_MLP7)
No se rechaza H0: los residuales son independientes (no correlacionados).
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP7_test_vola28 = metricas(y_test_vola28_7,test_preds_vola28_MLP7)
metrica_MLP7_test_vola28.index = metrica_MLP7_test_vola28.index.map({0: 'MLP Prueba Volatilidad ω = 28 y τ = 7'})
metrica_MLP7_test_vola28['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP7_vola28], index=metrica_MLP7_test_vola28.index)
metrica_MLP7_test_vola28['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP7_vola28], index=metrica_MLP7_test_vola28.index)
metrica_MLP7_test_vola28
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Volatilidad ω = 28 y τ = 7 | 1.0452e-05 | 2.75% | 0.0 | 0.0 | 49.69% | 0.2109 | 0.4604 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola28_MPL7)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola28_7, train_preds_vola28_MLP7, y_val_vola28_7, val_preds_vola28_MLP7, y_test_vola28_7, test_preds_vola28_MLP7)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra muy cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 14 días (\(\tau=14\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(14,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': ['tanh'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [50], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_vola28_14, y_train_vola28_14)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 11.4s
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 11.3s
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 11.4s
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 11.4s
[CV] END ....activation=tanh, epochs=50, learning_rate=0.001; total time= 11.5s
Mejor función de activación: tanh
Mejor número de epocas: 50
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -0.003689728630706668
A continuación, configuramos la red MLP empleando la API funcional de Keras. Con este método, una capa puede ser designada como la entrada para la siguiente capa en el momento de su definición.
En este contexto, Input es una función utilizada para establecer una capa de entrada en un modelo de red neuronal. La propiedad shape=(14,) define la estructura de los datos de entrada, lo que indica que estos tendrán 14 dimensiones. Por otro lado, dtype='float32' especifica que los elementos de esta capa serán números de punto flotante de 32 bits
A continuación se define función buil_models para generación de modelos de acuerdo a los parámetros indexados a tráves de la lista.
A idexación de parámetros función build_models_mlp con base a lo indicado en el númeral 3 del parcial práctico.
input_shape14 = 14
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP14_vola28 = build_models_mlp(input_shape14, neurons_list, dropout_rates, 'tanh')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_698"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_699 (InputLayer) [(None, 14)] 0
dense_1772 (Dense) (None, 32) 480
dense_1773 (Dense) (None, 16) 528
dense_1774 (Dense) (None, 16) 272
dropout_698 (Dropout) (None, 16) 0
dense_1775 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_699"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_700 (InputLayer) [(None, 14)] 0
dense_1776 (Dense) (None, 32) 480
dense_1777 (Dense) (None, 16) 528
dense_1778 (Dense) (None, 16) 272
dropout_699 (Dropout) (None, 16) 0
dense_1779 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_700"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_701 (InputLayer) [(None, 14)] 0
dense_1780 (Dense) (None, 32) 480
dense_1781 (Dense) (None, 16) 528
dense_1782 (Dense) (None, 16) 272
dropout_700 (Dropout) (None, 16) 0
dense_1783 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_701"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_702 (InputLayer) [(None, 14)] 0
dense_1784 (Dense) (None, 32) 480
dense_1785 (Dense) (None, 16) 528
dense_1786 (Dense) (None, 16) 272
dropout_701 (Dropout) (None, 16) 0
dense_1787 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_702"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_703 (InputLayer) [(None, 14)] 0
dense_1788 (Dense) (None, 32) 480
dense_1789 (Dense) (None, 16) 528
dense_1790 (Dense) (None, 16) 272
dropout_702 (Dropout) (None, 16) 0
dense_1791 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_703"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_704 (InputLayer) [(None, 14)] 0
dense_1792 (Dense) (None, 32) 480
dense_1793 (Dense) (None, 16) 528
dense_1794 (Dense) (None, 16) 272
dropout_703 (Dropout) (None, 16) 0
dense_1795 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_704"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_705 (InputLayer) [(None, 14)] 0
dense_1796 (Dense) (None, 32) 480
dense_1797 (Dense) (None, 16) 528
dense_1798 (Dense) (None, 16) 272
dropout_704 (Dropout) (None, 16) 0
dense_1799 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_705"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_706 (InputLayer) [(None, 14)] 0
dense_1800 (Dense) (None, 32) 480
dense_1801 (Dense) (None, 16) 528
dense_1802 (Dense) (None, 16) 272
dropout_705 (Dropout) (None, 16) 0
dense_1803 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_706"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_707 (InputLayer) [(None, 14)] 0
dense_1804 (Dense) (None, 32) 480
dense_1805 (Dense) (None, 16) 528
dense_1806 (Dense) (None, 16) 272
dropout_706 (Dropout) (None, 16) 0
dense_1807 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_707"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_708 (InputLayer) [(None, 14)] 0
dense_1808 (Dense) (None, 32) 480
dense_1809 (Dense) (None, 16) 528
dense_1810 (Dense) (None, 16) 272
dropout_707 (Dropout) (None, 16) 0
dense_1811 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_708"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_709 (InputLayer) [(None, 14)] 0
dense_1812 (Dense) (None, 32) 480
dense_1813 (Dense) (None, 16) 528
dense_1814 (Dense) (None, 16) 272
dropout_708 (Dropout) (None, 16) 0
dense_1815 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_709"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_710 (InputLayer) [(None, 14)] 0
dense_1816 (Dense) (None, 32) 480
dense_1817 (Dense) (None, 16) 528
dense_1818 (Dense) (None, 16) 272
dropout_709 (Dropout) (None, 16) 0
dense_1819 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_710"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_711 (InputLayer) [(None, 14)] 0
dense_1820 (Dense) (None, 32) 480
dense_1821 (Dense) (None, 16) 528
dense_1822 (Dense) (None, 16) 272
dropout_710 (Dropout) (None, 16) 0
dense_1823 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_711"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_712 (InputLayer) [(None, 14)] 0
dense_1824 (Dense) (None, 32) 480
dense_1825 (Dense) (None, 16) 528
dense_1826 (Dense) (None, 16) 272
dropout_711 (Dropout) (None, 16) 0
dense_1827 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_712"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_713 (InputLayer) [(None, 14)] 0
dense_1828 (Dense) (None, 32) 480
dense_1829 (Dense) (None, 16) 528
dense_1830 (Dense) (None, 16) 272
dropout_712 (Dropout) (None, 16) 0
dense_1831 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_713"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_714 (InputLayer) [(None, 14)] 0
dense_1832 (Dense) (None, 32) 480
dense_1833 (Dense) (None, 16) 528
dense_1834 (Dense) (None, 16) 272
dropout_713 (Dropout) (None, 16) 0
dense_1835 (Dense) (None, 1) 17
=================================================================
Total params: 1,297
Trainable params: 1,297
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola28_MLP14_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best14_vola28 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP14.
import os
from joblib import dump, load
history_vola28_MPL14 = []
# Iterar sobre cada modelo en la lista models
for i, model in enumerate(models_MLP14_vola28):
filename = f'history_vola28_MPL14_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola28_14, y=y_train_vola28_14, batch_size=16, epochs=50,
verbose=2, callbacks=[save_best14_vola28], validation_data=(X_val_vola28_14, y_val_vola28_14),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola28_MPL14.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola28_MPL14_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL14_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL14_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL14_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL14_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL14_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL14_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL14_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL14_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL14_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL14_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL14_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL14_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL14_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL14_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL14_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola21_MLP14_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model14_vola28 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model14_vola28 = load_model(best_model_path) # Cargar el modelo
if best_model14_vola28 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola21_MLP14_weights.31-0.0007.keras con val_loss: 0.0007
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best_model14_vola28 is not None:
train_preds_vola28_MLP14 = best_model14_vola28.predict(X_train_vola28_14)
val_preds_vola28_MLP14 = best_model14_vola28.predict(X_val_vola28_14)
test_preds_vola28_MLP14 = best_model14_vola28.predict(X_test_vola28_14)
# Aplanar las predicciones si es necesario
train_preds_vola28_MLP14 = np.squeeze(train_preds_vola28_MLP14)
val_preds_vola28_MLP14 = np.squeeze(val_preds_vola28_MLP14)
test_preds_vola28_MLP14 = np.squeeze(test_preds_vola21_MLP14)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola28_MLP14)
print("Predicciones de validación:", val_preds_vola28_MLP14)
print("Predicciones de prueba:", test_preds_vola28_MLP14)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
155/155 [==============================] - 0s 204us/step
1/1 [==============================] - 0s 9ms/step
1/1 [==============================] - 0s 9ms/step
Predicciones de Entrenamiento: [0.00096389 0.00096389 0.00096389 ... 0.02344747 0.01979331 0.01988902]
Predicciones de validación: [0.01919066 0.01982177 0.01985291 0.02086421 0.01934887 0.01943096
0.01967336 0.01954099 0.01636165 0.01642716 0.01673802 0.01693009
0.01565346 0.01581212]
Predicciones de prueba: [0.01619704 0.01876767 0.02032613 0.02681474 0.02800784 0.02734597
0.02763236 0.02751197 0.03080346 0.03511794 0.03477101 0.03459641
0.03437426 0.03399778]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 14 días (\(\tau = 14\)).
data_train_plot_vola28_14, data_val_plot_vola28_14, data_test_plot_vola28_14 = data_plot(df_1_st['Volatilidad_28'], 14)
Datos de entrenamiento:
4998 0.0000
4997 0.0000
4996 0.0000
4995 0.0000
4994 0.0000
...
32 0.0170
31 0.0172
30 0.0159
29 0.0160
28 0.0160
Name: Volatilidad_28, Length: 4971, dtype: float6414
Datos de validación:
27 0.0177
26 0.0188
25 0.0243
24 0.0252
23 0.0252
22 0.0252
21 0.0248
20 0.0277
19 0.0318
18 0.0320
17 0.0319
16 0.0315
15 0.0316
14 0.0316
Name: Volatilidad_28, dtype: float6414
Datos de prueba:
13 0.0320
12 0.0321
11 0.0316
10 0.0323
9 0.0331
8 0.0355
7 0.0362
6 0.0363
5 0.0403
4 0.0434
3 0.0440
2 0.0444
1 0.0444
0 0.0450
Name: Volatilidad_28, dtype: float6414
plot_model(data_train_plot_vola28_14[-100:], data_val_plot_vola28_14, data_test_plot_vola28_14, val_preds_vola28_MLP14, test_preds_vola28_MLP14, "Predicciones usando Perceptrón Multicapa (MLP) para un horizonte de 14 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de validación(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train14_vola28, jarque_bera_pval_MLP_train14_vola28 = diagnostic_plots(y_train_vola28_14, train_preds_vola28_MLP14)
Ljung-Box LB Statistic: 300.047176
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola28_MLP_train14 = metricas(y_train_vola28_14,train_preds_vola28_MLP14)
metrica_vola28_MLP_train14.index = metrica_vola28_MLP_train14.index.map({0: 'MLP Entrenamiento Volatilidad ω = 28 y τ = 14'})
metrica_vola28_MLP_train14['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train14_vola28], index=metrica_vola28_MLP_train14.index)
metrica_vola28_MLP_train14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train14_vola28], index=metrica_vola28_MLP_train14.index)
metrica_vola28_MLP_train14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Volatilidad ω = 28 y τ = 14 | 0.7766 | 5.47% | 0.0 | 0.0 | 95.75% | 3.2173e-67 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP14_vola28, jarque_bera_pvalMLP14_vola28 = evaluate_residuals(data_test_plot_vola28_14, test_preds_vola28_MLP14)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP14_test_vola28= metricas(y_test_vola28_14,test_preds_vola28_MLP14)
metrica_MLP14_test_vola28.index = metrica_MLP14_test_vola28.index.map({0: 'MLP Prueba Volatilidad ω = 28 τ = 14'})
metrica_MLP14_test_vola28['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP14_vola28], index=metrica_MLP14_test_vola28.index)
metrica_MLP14_test_vola28['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP14_vola28], index=metrica_MLP14_test_vola28.index)
metrica_MLP14_test_vola28
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Volatilidad ω = 28 τ = 14 | 8.3423e-05 | 7.97% | 0.0 | 0.0 | 73.12% | 0.0356 | 0.7552 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola28_MPL14)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola28_14, train_preds_vola28_MLP14, y_val_vola28_14, val_preds_vola28_MLP14, y_test_vola28_14, test_preds_vola28_MLP14)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado. Sin embargo, a pesar de esto, se evidencia que existe autocorrelación en los residuales, lo que hace que nuestro modelo no sea confiable.
Horizonte de 21 días (\(\tau=21\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(21,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': ['tanh'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [100], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_vola28_21, y_train_vola28_21)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ...activation=tanh, epochs=100, learning_rate=0.001; total time= 18.9s
[CV] END ...activation=tanh, epochs=100, learning_rate=0.001; total time= 19.2s
[CV] END ...activation=tanh, epochs=100, learning_rate=0.001; total time= 19.4s
[CV] END ...activation=tanh, epochs=100, learning_rate=0.001; total time= 19.5s
[CV] END ...activation=tanh, epochs=100, learning_rate=0.001; total time= 19.4s
Mejor función de activación: tanh
Mejor número de epocas: 100
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -0.0034608248621225356
A continuación, configuramos la red MLP empleando la API funcional de Keras. Con este método, una capa puede ser designada como la entrada para la siguiente capa en el momento de su definición.
En este contexto, Input es una función utilizada para establecer una capa de entrada en un modelo de red neuronal. La propiedad shape=(21,) define la estructura de los datos de entrada, lo que indica que estos tendrán 21 dimensiones. Por otro lado, dtype='float32' especifica que los elementos de esta capa serán números de punto flotante de 32 bits.
A continuación se define función buil_models para generación de modelos de acuerdo a los parámetros indexados a tráves de la lista.
A idexación de parámetros función build_models_mlp con base a lo indicado en el númeral 3 del parcial práctico.
input_shape21 = 21
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP21_vola28 = build_models_mlp(input_shape21, neurons_list, dropout_rates, 'tanh')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_715"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_716 (InputLayer) [(None, 21)] 0
dense_1840 (Dense) (None, 32) 704
dense_1841 (Dense) (None, 16) 528
dense_1842 (Dense) (None, 16) 272
dropout_715 (Dropout) (None, 16) 0
dense_1843 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_716"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_717 (InputLayer) [(None, 21)] 0
dense_1844 (Dense) (None, 32) 704
dense_1845 (Dense) (None, 16) 528
dense_1846 (Dense) (None, 16) 272
dropout_716 (Dropout) (None, 16) 0
dense_1847 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_717"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_718 (InputLayer) [(None, 21)] 0
dense_1848 (Dense) (None, 32) 704
dense_1849 (Dense) (None, 16) 528
dense_1850 (Dense) (None, 16) 272
dropout_717 (Dropout) (None, 16) 0
dense_1851 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_718"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_719 (InputLayer) [(None, 21)] 0
dense_1852 (Dense) (None, 32) 704
dense_1853 (Dense) (None, 16) 528
dense_1854 (Dense) (None, 16) 272
dropout_718 (Dropout) (None, 16) 0
dense_1855 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_719"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_720 (InputLayer) [(None, 21)] 0
dense_1856 (Dense) (None, 32) 704
dense_1857 (Dense) (None, 16) 528
dense_1858 (Dense) (None, 16) 272
dropout_719 (Dropout) (None, 16) 0
dense_1859 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_720"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_721 (InputLayer) [(None, 21)] 0
dense_1860 (Dense) (None, 32) 704
dense_1861 (Dense) (None, 16) 528
dense_1862 (Dense) (None, 16) 272
dropout_720 (Dropout) (None, 16) 0
dense_1863 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_721"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_722 (InputLayer) [(None, 21)] 0
dense_1864 (Dense) (None, 32) 704
dense_1865 (Dense) (None, 16) 528
dense_1866 (Dense) (None, 16) 272
dropout_721 (Dropout) (None, 16) 0
dense_1867 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_722"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_723 (InputLayer) [(None, 21)] 0
dense_1868 (Dense) (None, 32) 704
dense_1869 (Dense) (None, 16) 528
dense_1870 (Dense) (None, 16) 272
dropout_722 (Dropout) (None, 16) 0
dense_1871 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_723"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_724 (InputLayer) [(None, 21)] 0
dense_1872 (Dense) (None, 32) 704
dense_1873 (Dense) (None, 16) 528
dense_1874 (Dense) (None, 16) 272
dropout_723 (Dropout) (None, 16) 0
dense_1875 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_724"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_725 (InputLayer) [(None, 21)] 0
dense_1876 (Dense) (None, 32) 704
dense_1877 (Dense) (None, 16) 528
dense_1878 (Dense) (None, 16) 272
dropout_724 (Dropout) (None, 16) 0
dense_1879 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_725"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_726 (InputLayer) [(None, 21)] 0
dense_1880 (Dense) (None, 32) 704
dense_1881 (Dense) (None, 16) 528
dense_1882 (Dense) (None, 16) 272
dropout_725 (Dropout) (None, 16) 0
dense_1883 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_726"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_727 (InputLayer) [(None, 21)] 0
dense_1884 (Dense) (None, 32) 704
dense_1885 (Dense) (None, 16) 528
dense_1886 (Dense) (None, 16) 272
dropout_726 (Dropout) (None, 16) 0
dense_1887 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_727"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_728 (InputLayer) [(None, 21)] 0
dense_1888 (Dense) (None, 32) 704
dense_1889 (Dense) (None, 16) 528
dense_1890 (Dense) (None, 16) 272
dropout_727 (Dropout) (None, 16) 0
dense_1891 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_728"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_729 (InputLayer) [(None, 21)] 0
dense_1892 (Dense) (None, 32) 704
dense_1893 (Dense) (None, 16) 528
dense_1894 (Dense) (None, 16) 272
dropout_728 (Dropout) (None, 16) 0
dense_1895 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_729"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_730 (InputLayer) [(None, 21)] 0
dense_1896 (Dense) (None, 32) 704
dense_1897 (Dense) (None, 16) 528
dense_1898 (Dense) (None, 16) 272
dropout_729 (Dropout) (None, 16) 0
dense_1899 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_730"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_731 (InputLayer) [(None, 21)] 0
dense_1900 (Dense) (None, 32) 704
dense_1901 (Dense) (None, 16) 528
dense_1902 (Dense) (None, 16) 272
dropout_730 (Dropout) (None, 16) 0
dense_1903 (Dense) (None, 1) 17
=================================================================
Total params: 1,521
Trainable params: 1,521
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola28_MLP21_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best21_vola28 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP21.
import os
from joblib import dump, load
# Suponiendo que ts_model es una lista de modelos
history_vola28_MPL21 = []
# Iterar sobre cada modelo en la lista models_MLP14
for i, model in enumerate(models_MLP21_vola28):
filename = f'history_vola28_MPL21_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola28_21, y=y_train_vola28_21, batch_size=16, epochs=100,
verbose=2, callbacks=[save_best21_vola28], validation_data=(X_val_vola28_21, y_val_vola28_21),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola28_MPL21.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola28_MPL21_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL21_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL21_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL21_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL21_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL21_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL21_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL21_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL21_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL21_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL21_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL21_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL21_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL21_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL21_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL21_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola21_MLP21_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model21_vola28 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model21_vola28 = load_model(best_model_path) # Cargar el modelo
if best_model21_vola28 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola21_MLP21_weights.33-0.0009.keras con val_loss: 0.0009
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best_model21_vola28 is not None:
train_preds_vola28_MLP21 = best_model21_vola28.predict(X_train_vola28_21)
val_preds_vola28_MLP21 = best_model21_vola28.predict(X_val_vola28_21)
test_preds_vola28_MLP21 = best_model21_vola28.predict(X_test_vola28_21)
# Aplanar las predicciones si es necesario
train_preds_vola28_MLP21 = np.squeeze(train_preds_vola28_MLP21)
val_preds_vola28_MLP21 = np.squeeze(val_preds_vola28_MLP21)
test_preds_vola28_MLP21 = np.squeeze(test_preds_vola21_MLP21)
# Imprimir las predicciones
print('Predicciones de entrenamiento', train_preds_vola28_MLP21)
print("Predicciones de validación:", val_preds_vola28_MLP21)
print("Predicciones de prueba:", test_preds_vola28_MLP21)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
154/154 [==============================] - 0s 236us/step
1/1 [==============================] - 0s 10ms/step
1/1 [==============================] - 0s 10ms/step
Predicciones de entrenamiento [0.00090695 0.00090695 0.00090695 ... 0.02768848 0.0277948 0.02782575]
Predicciones de validación: [0.02773869 0.0289297 0.02870962 0.02838853 0.0281683 0.02947461
0.02961299 0.02954107 0.02873065 0.0285065 0.02731386 0.02653163
0.02650014 0.0265651 0.02674493 0.02304945 0.02275787 0.02339505
0.02389429 0.02034396 0.02012679]
Predicciones de prueba: [0.02008242 0.01652398 0.01668522 0.01795449 0.01781894 0.01621883
0.01652392 0.01663592 0.01646791 0.01594731 0.01566346 0.01654514
0.01716333 0.01691436 0.01634654 0.01892112 0.02054337 0.02705515
0.02855585 0.0279751 0.0280706 ]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 21 días (\(\tau = 21\)).
data_train_plot_vola28_21, data_val_plot_vola28_21, data_test_plot_vola28_21 = data_plot(df_1_st['Volatilidad_28'], 21)
Datos de entrenamiento:
4998 0.0000
4997 0.0000
4996 0.0000
4995 0.0000
4994 0.0000
...
46 0.0238
45 0.0242
44 0.0205
43 0.0205
42 0.0197
Name: Volatilidad_28, Length: 4957, dtype: float6421
Datos de validación:
41 0.0203
40 0.0203
39 0.0213
38 0.0197
37 0.0197
36 0.0199
35 0.0198
34 0.0167
33 0.0167
32 0.0170
31 0.0172
30 0.0159
29 0.0160
28 0.0160
27 0.0177
26 0.0188
25 0.0243
24 0.0252
23 0.0252
22 0.0252
21 0.0248
Name: Volatilidad_28, dtype: float6421
Datos de prueba:
20 0.0277
19 0.0318
18 0.0320
17 0.0319
16 0.0315
15 0.0316
14 0.0316
13 0.0320
12 0.0321
11 0.0316
10 0.0323
9 0.0331
8 0.0355
7 0.0362
6 0.0363
5 0.0403
4 0.0434
3 0.0440
2 0.0444
1 0.0444
0 0.0450
Name: Volatilidad_28, dtype: float6421
plot_model(data_train_plot_vola28_21[-100:], data_val_plot_vola28_21, data_test_plot_vola28_21, val_preds_vola28_MLP21, test_preds_vola28_MLP21, "Predicciones usando Perceptrón Multicapa (MLP) para un horizonte de 21 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de validación(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train21_vola28, jarque_bera_pval_MLP_train21_vola28 = diagnostic_plots(y_train_vola28_21, train_preds_vola28_MLP21)
Ljung-Box LB Statistic: 351.172296
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola28_MLP_train21 = metricas(y_train_vola28_21,train_preds_vola28_MLP21)
metrica_vola28_MLP_train21.index = metrica_vola28_MLP_train21.index.map({0: 'MLP Entrenamiento Volatilidad ω = 28 y τ = 21'})
metrica_vola28_MLP_train21['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train21_vola28], index=metrica_vola28_MLP_train21.index)
metrica_vola28_MLP_train21['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train21_vola28], index=metrica_vola28_MLP_train21.index)
metrica_vola28_MLP_train21
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Volatilidad ω = 28 y τ = 21 | 0.7573 | 4.54% | 0.0 | 0.0 | 95.85% | 2.3543e-78 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP21_vola28, jarque_bera_pvalMLP21_vola28 = evaluate_residuals(data_test_plot_vola28_21, test_preds_vola28_MLP21)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP21_test_vola28 = metricas(y_test_vola28_21,test_preds_vola28_MLP21)
metrica_MLP21_test_vola28.index = metrica_MLP21_test_vola28.index.map({0: 'MLP Prueba Volatilidad ω = 28 y τ = 21'})
metrica_MLP21_test_vola28['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP21_vola28], index=metrica_MLP21_test_vola28.index)
metrica_MLP21_test_vola28['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP21_vola28], index=metrica_MLP21_test_vola28.index)
metrica_MLP21_test_vola28
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Volatilidad ω = 28 y τ = 21 | 0.0001 | 9.38% | 0.0 | 0.0 | 43.97% | 0.0121 | 0.1292 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola28_MPL21)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola28_21, train_preds_vola28_MLP21, y_val_vola28_21, val_preds_vola28_MLP21, y_test_vola28_21, test_preds_vola28_MLP21)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo. De igual forma se observa una alta variabilidad en los residuales.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 28 días (\(\tau=28\))#
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
def create_mlp_model(activation='tanh',learning_rate=0.001):
input_layer = Input(shape=(28,), dtype='float32') # Capa de entrada
dense1 = Dense(32, activation=activation)(input_layer)
dense2 = Dense(16, activation=activation)(dense1)
dense3 = Dense(16, activation=activation)(dense2)
dropout_layer = Dropout(0.2)(dense3) # Capa Dropout para regularización
output_layer = Dense(1, activation='linear')(dropout_layer) # Capa de salida
model = Model(inputs=input_layer, outputs=output_layer) # Crear el modelo
model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.legacy.Adam(learning_rate = learning_rate)) # Compilar el modelo
return model
# Configuración el Grid Search
model = KerasRegressor(build_fn=create_mlp_model, epochs=20, batch_size=16, verbose=0)
param_grid = {
'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [50], # epocas a probar [20, 50, 100, 200, 300]
'learning_rate' : [0.001] # learning rate a probar [0.001, 0.01, 0.1, 0.2, 0.3]
}
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=KFold(n_splits=5, shuffle=True), verbose =2)
#Entrenar el modelo con Grid Search
grid_result = grid.fit(X_train_vola28_28, y_train_vola28_28)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (Learning Rate): {grid_result.best_params_['learning_rate']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 8.2s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 8.4s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 8.4s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 8.6s
[CV] END ....activation=relu, epochs=50, learning_rate=0.001; total time= 8.8s
Mejor función de activación: relu
Mejor número de epocas: 50
Mejor Longitud de paso μ (Learning Rate): 0.001
Mejor Puntuación: -0.0037463522050529717
A continuación, configuramos la red MLP empleando la API funcional de Keras. Con este método, una capa puede ser designada como la entrada para la siguiente capa en el momento de su definición.
En este contexto, Input es una función utilizada para establecer una capa de entrada en un modelo de red neuronal. La propiedad shape=(28,) define la estructura de los datos de entrada, lo que indica que estos tendrán 28 dimensiones. Por otro lado, dtype='float32' especifica que los elementos de esta capa serán números de punto flotante de 32 bits.
A continuación se define función buil_models para generación de modelos de acuerdo a los parámetros indexados a tráves de la lista.
A idexación de parámetros función build_models_mlp con base a lo indicado en el númeral 3 del parcial práctico.
input_shape28 = 28
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_MLP28_vola28 = build_models_mlp(input_shape28, neurons_list, dropout_rates, 'relu')
Modelo con 10 neuronas y dropout 0.2:
Model: "model_732"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_733 (InputLayer) [(None, 28)] 0
dense_1908 (Dense) (None, 32) 928
dense_1909 (Dense) (None, 16) 528
dense_1910 (Dense) (None, 16) 272
dropout_732 (Dropout) (None, 16) 0
dense_1911 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.4:
Model: "model_733"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_734 (InputLayer) [(None, 28)] 0
dense_1912 (Dense) (None, 32) 928
dense_1913 (Dense) (None, 16) 528
dense_1914 (Dense) (None, 16) 272
dropout_733 (Dropout) (None, 16) 0
dense_1915 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.6:
Model: "model_734"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_735 (InputLayer) [(None, 28)] 0
dense_1916 (Dense) (None, 32) 928
dense_1917 (Dense) (None, 16) 528
dense_1918 (Dense) (None, 16) 272
dropout_734 (Dropout) (None, 16) 0
dense_1919 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10 neuronas y dropout 0.8:
Model: "model_735"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_736 (InputLayer) [(None, 28)] 0
dense_1920 (Dense) (None, 32) 928
dense_1921 (Dense) (None, 16) 528
dense_1922 (Dense) (None, 16) 272
dropout_735 (Dropout) (None, 16) 0
dense_1923 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.2:
Model: "model_736"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_737 (InputLayer) [(None, 28)] 0
dense_1924 (Dense) (None, 32) 928
dense_1925 (Dense) (None, 16) 528
dense_1926 (Dense) (None, 16) 272
dropout_736 (Dropout) (None, 16) 0
dense_1927 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.4:
Model: "model_737"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_738 (InputLayer) [(None, 28)] 0
dense_1928 (Dense) (None, 32) 928
dense_1929 (Dense) (None, 16) 528
dense_1930 (Dense) (None, 16) 272
dropout_737 (Dropout) (None, 16) 0
dense_1931 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.6:
Model: "model_738"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_739 (InputLayer) [(None, 28)] 0
dense_1932 (Dense) (None, 32) 928
dense_1933 (Dense) (None, 16) 528
dense_1934 (Dense) (None, 16) 272
dropout_738 (Dropout) (None, 16) 0
dense_1935 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 100 neuronas y dropout 0.8:
Model: "model_739"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_740 (InputLayer) [(None, 28)] 0
dense_1936 (Dense) (None, 32) 928
dense_1937 (Dense) (None, 16) 528
dense_1938 (Dense) (None, 16) 272
dropout_739 (Dropout) (None, 16) 0
dense_1939 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_740"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_741 (InputLayer) [(None, 28)] 0
dense_1940 (Dense) (None, 32) 928
dense_1941 (Dense) (None, 16) 528
dense_1942 (Dense) (None, 16) 272
dropout_740 (Dropout) (None, 16) 0
dense_1943 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_741"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_742 (InputLayer) [(None, 28)] 0
dense_1944 (Dense) (None, 32) 928
dense_1945 (Dense) (None, 16) 528
dense_1946 (Dense) (None, 16) 272
dropout_741 (Dropout) (None, 16) 0
dense_1947 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_742"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_743 (InputLayer) [(None, 28)] 0
dense_1948 (Dense) (None, 32) 928
dense_1949 (Dense) (None, 16) 528
dense_1950 (Dense) (None, 16) 272
dropout_742 (Dropout) (None, 16) 0
dense_1951 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_743"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_744 (InputLayer) [(None, 28)] 0
dense_1952 (Dense) (None, 32) 928
dense_1953 (Dense) (None, 16) 528
dense_1954 (Dense) (None, 16) 272
dropout_743 (Dropout) (None, 16) 0
dense_1955 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_744"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_745 (InputLayer) [(None, 28)] 0
dense_1956 (Dense) (None, 32) 928
dense_1957 (Dense) (None, 16) 528
dense_1958 (Dense) (None, 16) 272
dropout_744 (Dropout) (None, 16) 0
dense_1959 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_745"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_746 (InputLayer) [(None, 28)] 0
dense_1960 (Dense) (None, 32) 928
dense_1961 (Dense) (None, 16) 528
dense_1962 (Dense) (None, 16) 272
dropout_745 (Dropout) (None, 16) 0
dense_1963 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_746"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_747 (InputLayer) [(None, 28)] 0
dense_1964 (Dense) (None, 32) 928
dense_1965 (Dense) (None, 16) 528
dense_1966 (Dense) (None, 16) 272
dropout_746 (Dropout) (None, 16) 0
dense_1967 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_747"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_748 (InputLayer) [(None, 28)] 0
dense_1968 (Dense) (None, 32) 928
dense_1969 (Dense) (None, 16) 528
dense_1970 (Dense) (None, 16) 272
dropout_747 (Dropout) (None, 16) 0
dense_1971 (Dense) (None, 1) 17
=================================================================
Total params: 1,745
Trainable params: 1,745
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola28_MLP28_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best28_vola28 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_MLP28.
import os
from joblib import dump, load
# Suponiendo que ts_model es una lista de modelos
history_vola28_MPL28 = []
# Iterar sobre cada modelo en la lista models
for i, model in enumerate(models_MLP28_vola28):
filename = f'history_vola28_MPL28_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola28_28, y=y_train_vola28_28, batch_size=16, epochs=50, #Las epocas son las estimadas en el grid search
verbose=2, callbacks=[save_best28_vola28], validation_data=(X_val_vola28_28, y_val_vola28_28),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola28_MPL28.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola28_MPL28_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL28_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL28_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL28_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL28_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL28_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL28_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL28_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL28_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL28_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL28_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL28_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL28_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL28_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL28_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_MPL28_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola28_MLP28_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best_model28_vola28 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best_model28_vola28 = load_model(best_model_path) # Cargar el modelo
if best_model28_vola28 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola28_MLP28_weights.34-0.0006.keras con val_loss: 0.0006
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best_model28_vola28 is not None:
train_preds_vola28_MLP28 = best_model28_vola28.predict(X_train_vola28_28)
val_preds_vola28_MLP28 = best_model28_vola28.predict(X_val_vola28_28)
test_preds_vola28_MLP28 = best_model28_vola28.predict(X_test_vola28_28)
# Aplanar las predicciones si es necesario
train_preds_vola28_MLP28 = np.squeeze(train_preds_vola28_MLP28)
val_preds_vola28_MLP28 = np.squeeze(val_preds_vola28_MLP28)
test_preds_vola28_MLP28 = np.squeeze(test_preds_vola28_MLP28)
# Imprimir las predicciones
print("Predicciones de entrenamiento:", train_preds_vola28_MLP28)
print("Predicciones de validación:", val_preds_vola28_MLP28)
print("Predicciones de prueba:", test_preds_vola28_MLP28)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
153/153 [==============================] - 0s 200us/step
1/1 [==============================] - 0s 10ms/step
1/1 [==============================] - 0s 10ms/step
Predicciones de entrenamiento: [0.00336624 0.00336624 0.00336624 ... 0.02475141 0.02464514 0.02441207]
Predicciones de validación: [0.02440551 0.02431989 0.02286901 0.0238506 0.02442062 0.02440939
0.02444185 0.0244737 0.02491317 0.02518687 0.02484467 0.02462438
0.02736116 0.02804216 0.02846966 0.02799868 0.02791543 0.02764187
0.02774693 0.02795559 0.02827209 0.0280161 0.02869812 0.0290857
0.02864251 0.02835 0.02959434 0.02971701]
Predicciones de prueba: [0.02979818 0.02937397 0.02874593 0.02782186 0.02697697 0.02690599
0.02686115 0.02723288 0.02437188 0.023582 0.02368518 0.02436904
0.02181569 0.02108483 0.02043468 0.02075481 0.0208072 0.02133963
0.02079738 0.02033143 0.02040918 0.02048375 0.01838858 0.01759318
0.01770653 0.0179197 0.01733317 0.01711683]
A continuación se indexan los parámetros sobre la función data_plot() para un horionte de 28 días (\(\tau = 28\)).
data_train_plot_vola28_28, data_val_plot_vola28_28 , data_test_plot_vola28_28 = data_plot(df_1_st['Volatilidad_28'], 28)
Datos de entrenamiento:
4998 0.0000
4997 0.0000
4996 0.0000
4995 0.0000
4994 0.0000
...
60 0.0285
59 0.0283
58 0.0297
57 0.0297
56 0.0297
Name: Volatilidad_28, Length: 4943, dtype: float6428
Datos de validación:
55 0.0290
54 0.0288
53 0.0274
52 0.0269
51 0.0269
50 0.0269
49 0.0269
48 0.0232
47 0.0232
46 0.0238
45 0.0242
44 0.0205
43 0.0205
42 0.0197
41 0.0203
40 0.0203
39 0.0213
38 0.0197
37 0.0197
36 0.0199
35 0.0198
34 0.0167
33 0.0167
32 0.0170
31 0.0172
30 0.0159
29 0.0160
28 0.0160
Name: Volatilidad_28, dtype: float6428
Datos de prueba:
27 0.0177
26 0.0188
25 0.0243
24 0.0252
23 0.0252
22 0.0252
21 0.0248
20 0.0277
19 0.0318
18 0.0320
17 0.0319
16 0.0315
15 0.0316
14 0.0316
13 0.0320
12 0.0321
11 0.0316
10 0.0323
9 0.0331
8 0.0355
7 0.0362
6 0.0363
5 0.0403
4 0.0434
3 0.0440
2 0.0444
1 0.0444
0 0.0450
Name: Volatilidad_28, dtype: float6428
plot_model(data_train_plot_vola28_28[-100:], data_val_plot_vola28_28, data_test_plot_vola28_28, val_preds_vola28_MLP28, test_preds_vola28_MLP28, "Predicciones usando Perceptrón Multicapa (MLP) para un horizonte de 28 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de validación(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_MLP_train28_vola28, jarque_bera_pval_MLP_train28_vola28 = diagnostic_plots(y_train_vola28_28, train_preds_vola28_MLP28)
Ljung-Box LB Statistic: 450.056271
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola28_MLP_train28 = metricas(y_train_vola28_28,train_preds_vola28_MLP28)
metrica_vola28_MLP_train28.index = metrica_vola28_MLP_train28.index.map({0: 'MLP Entrenamiento Volatilidad ω = 28 y τ = 28'})
metrica_vola28_MLP_train28['Ljung-Box p-value'] = pd.Series([ljung_box_pval_MLP_train28_vola28], index=metrica_vola28_MLP_train28.index)
metrica_vola28_MLP_train28['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_MLP_train28_vola28], index=metrica_vola28_MLP_train28.index)
metrica_vola28_MLP_train28
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Volatilidad ω = 28 y τ = 28 | 0.6679 | 6.35% | 0.0 | 0.0 | 96.34% | 7.0124e-100 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalMLP28_vola28, jarque_bera_pvalMLP28_vola28 = evaluate_residuals(data_test_plot_vola28_28, test_preds_vola28_MLP28)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_MLP28_test_vola28 = metricas(y_test_vola28_28,test_preds_vola28_MLP28)
metrica_MLP28_test_vola28.index = metrica_MLP28_test_vola28.index.map({0: 'MLP Prueba Volatilidad ω = 21 y τ = 28'})
metrica_MLP28_test_vola28['Ljung-Box p-value'] = pd.Series([ljung_box_pvalMLP28_vola28], index=metrica_MLP28_test_vola28.index)
metrica_MLP28_test_vola28['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalMLP28_vola28], index=metrica_MLP28_test_vola28.index)
metrica_MLP28_test_vola28
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Volatilidad ω = 21 y τ = 28 | 6.9863e-05 | 5.79% | 0.0 | 0.0 | 85.32% | 1.6950e-06 | 0.7195 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola28_MPL28)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola28_28, train_preds_vola28_MLP28, y_val_vola28_28, val_preds_vola28_MLP28, y_test_vola28_28, test_preds_vola28_MLP28)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo. De igual forma se observa una alta variabilidad en los residuales.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Volatilidad ω = 28 (Volatilidad_28): Memoria a Corto y Largo Plazo (LSTM) #
Ya definimos los regresores(X) y la variable objetivo(y) para el proceso de entrenamiento y validación en la selección correspondiente al modelo Perceptrones Multicapa a tráves de la función create_time_series_datasets(), sin embargo, ésta se utiliza para generar arreglos 2D de forma (números de muestras, números de paso de tiempo). Dado que la entrada a las capas de una RNN debe ser de forma: número de muestras, número de paso de tiempo, número de características por paso de tiempo; procedemos con la sefinición de la función change_dimension_lstm() para realizar la transformación de 2D a 3D.
Horizonte de 7 días (\(\tau=7\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_vola28_lstm_7, X_val_vola28_lstm_7, X_test_vola28_lstm_7 = change_dimension_lstm(X_train_vola28_7, X_val_vola28_7, X_test_vola28_7)
Shape of 3D arrays X: (4971, 7, 1) (7, 7, 1) (7, 7, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(7,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=20, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_vola28_lstm_7, y_train_vola28_7)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 23.7s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 24.1s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 24.4s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 24.5s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 24.6s
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 0.017751403726695182
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape7 = 7
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM7_vola28_7 = build_models_lstm(input_shape7, neurons_list, dropout_rates ,'SGD')
Model: "model_749"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_750 (InputLayer) [(None, 7, 1)] 0
lstm_682 (LSTM) (None, 7, 64) 16896
lstm_683 (LSTM) (None, 32) 12416
dropout_749 (Dropout) (None, 32) 0
dense_1973 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_749"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_750 (InputLayer) [(None, 7, 1)] 0
lstm_682 (LSTM) (None, 7, 64) 16896
lstm_683 (LSTM) (None, 32) 12416
dropout_749 (Dropout) (None, 32) 0
dense_1973 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_750"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_751 (InputLayer) [(None, 7, 1)] 0
lstm_684 (LSTM) (None, 7, 64) 16896
lstm_685 (LSTM) (None, 32) 12416
dropout_750 (Dropout) (None, 32) 0
dense_1974 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_750"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_751 (InputLayer) [(None, 7, 1)] 0
lstm_684 (LSTM) (None, 7, 64) 16896
lstm_685 (LSTM) (None, 32) 12416
dropout_750 (Dropout) (None, 32) 0
dense_1974 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_751"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_752 (InputLayer) [(None, 7, 1)] 0
lstm_686 (LSTM) (None, 7, 64) 16896
lstm_687 (LSTM) (None, 32) 12416
dropout_751 (Dropout) (None, 32) 0
dense_1975 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_751"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_752 (InputLayer) [(None, 7, 1)] 0
lstm_686 (LSTM) (None, 7, 64) 16896
lstm_687 (LSTM) (None, 32) 12416
dropout_751 (Dropout) (None, 32) 0
dense_1975 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_752"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_753 (InputLayer) [(None, 7, 1)] 0
lstm_688 (LSTM) (None, 7, 64) 16896
lstm_689 (LSTM) (None, 32) 12416
dropout_752 (Dropout) (None, 32) 0
dense_1976 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_752"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_753 (InputLayer) [(None, 7, 1)] 0
lstm_688 (LSTM) (None, 7, 64) 16896
lstm_689 (LSTM) (None, 32) 12416
dropout_752 (Dropout) (None, 32) 0
dense_1976 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_753"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_754 (InputLayer) [(None, 7, 1)] 0
lstm_690 (LSTM) (None, 7, 64) 16896
lstm_691 (LSTM) (None, 32) 12416
dropout_753 (Dropout) (None, 32) 0
dense_1977 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_753"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_754 (InputLayer) [(None, 7, 1)] 0
lstm_690 (LSTM) (None, 7, 64) 16896
lstm_691 (LSTM) (None, 32) 12416
dropout_753 (Dropout) (None, 32) 0
dense_1977 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_754"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_755 (InputLayer) [(None, 7, 1)] 0
lstm_692 (LSTM) (None, 7, 64) 16896
lstm_693 (LSTM) (None, 32) 12416
dropout_754 (Dropout) (None, 32) 0
dense_1978 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_754"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_755 (InputLayer) [(None, 7, 1)] 0
lstm_692 (LSTM) (None, 7, 64) 16896
lstm_693 (LSTM) (None, 32) 12416
dropout_754 (Dropout) (None, 32) 0
dense_1978 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_755"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_756 (InputLayer) [(None, 7, 1)] 0
lstm_694 (LSTM) (None, 7, 64) 16896
lstm_695 (LSTM) (None, 32) 12416
dropout_755 (Dropout) (None, 32) 0
dense_1979 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_755"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_756 (InputLayer) [(None, 7, 1)] 0
lstm_694 (LSTM) (None, 7, 64) 16896
lstm_695 (LSTM) (None, 32) 12416
dropout_755 (Dropout) (None, 32) 0
dense_1979 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_756"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_757 (InputLayer) [(None, 7, 1)] 0
lstm_696 (LSTM) (None, 7, 64) 16896
lstm_697 (LSTM) (None, 32) 12416
dropout_756 (Dropout) (None, 32) 0
dense_1980 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_756"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_757 (InputLayer) [(None, 7, 1)] 0
lstm_696 (LSTM) (None, 7, 64) 16896
lstm_697 (LSTM) (None, 32) 12416
dropout_756 (Dropout) (None, 32) 0
dense_1980 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_757"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_758 (InputLayer) [(None, 7, 1)] 0
lstm_698 (LSTM) (None, 7, 64) 16896
lstm_699 (LSTM) (None, 32) 12416
dropout_757 (Dropout) (None, 32) 0
dense_1981 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_757"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_758 (InputLayer) [(None, 7, 1)] 0
lstm_698 (LSTM) (None, 7, 64) 16896
lstm_699 (LSTM) (None, 32) 12416
dropout_757 (Dropout) (None, 32) 0
dense_1981 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_758"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_759 (InputLayer) [(None, 7, 1)] 0
lstm_700 (LSTM) (None, 7, 64) 16896
lstm_701 (LSTM) (None, 32) 12416
dropout_758 (Dropout) (None, 32) 0
dense_1982 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_758"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_759 (InputLayer) [(None, 7, 1)] 0
lstm_700 (LSTM) (None, 7, 64) 16896
lstm_701 (LSTM) (None, 32) 12416
dropout_758 (Dropout) (None, 32) 0
dense_1982 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_759"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_760 (InputLayer) [(None, 7, 1)] 0
lstm_702 (LSTM) (None, 7, 64) 16896
lstm_703 (LSTM) (None, 32) 12416
dropout_759 (Dropout) (None, 32) 0
dense_1983 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_759"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_760 (InputLayer) [(None, 7, 1)] 0
lstm_702 (LSTM) (None, 7, 64) 16896
lstm_703 (LSTM) (None, 32) 12416
dropout_759 (Dropout) (None, 32) 0
dense_1983 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_760"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_761 (InputLayer) [(None, 7, 1)] 0
lstm_704 (LSTM) (None, 7, 64) 16896
lstm_705 (LSTM) (None, 32) 12416
dropout_760 (Dropout) (None, 32) 0
dense_1984 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_760"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_761 (InputLayer) [(None, 7, 1)] 0
lstm_704 (LSTM) (None, 7, 64) 16896
lstm_705 (LSTM) (None, 32) 12416
dropout_760 (Dropout) (None, 32) 0
dense_1984 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_761"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_762 (InputLayer) [(None, 7, 1)] 0
lstm_706 (LSTM) (None, 7, 64) 16896
lstm_707 (LSTM) (None, 32) 12416
dropout_761 (Dropout) (None, 32) 0
dense_1985 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_761"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_762 (InputLayer) [(None, 7, 1)] 0
lstm_706 (LSTM) (None, 7, 64) 16896
lstm_707 (LSTM) (None, 32) 12416
dropout_761 (Dropout) (None, 32) 0
dense_1985 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_762"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_763 (InputLayer) [(None, 7, 1)] 0
lstm_708 (LSTM) (None, 7, 64) 16896
lstm_709 (LSTM) (None, 32) 12416
dropout_762 (Dropout) (None, 32) 0
dense_1986 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_762"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_763 (InputLayer) [(None, 7, 1)] 0
lstm_708 (LSTM) (None, 7, 64) 16896
lstm_709 (LSTM) (None, 32) 12416
dropout_762 (Dropout) (None, 32) 0
dense_1986 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_763"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_764 (InputLayer) [(None, 7, 1)] 0
lstm_710 (LSTM) (None, 7, 64) 16896
lstm_711 (LSTM) (None, 32) 12416
dropout_763 (Dropout) (None, 32) 0
dense_1987 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_763"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_764 (InputLayer) [(None, 7, 1)] 0
lstm_710 (LSTM) (None, 7, 64) 16896
lstm_711 (LSTM) (None, 32) 12416
dropout_763 (Dropout) (None, 32) 0
dense_1987 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_764"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_765 (InputLayer) [(None, 7, 1)] 0
lstm_712 (LSTM) (None, 7, 64) 16896
lstm_713 (LSTM) (None, 32) 12416
dropout_764 (Dropout) (None, 32) 0
dense_1988 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_764"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_765 (InputLayer) [(None, 7, 1)] 0
lstm_712 (LSTM) (None, 7, 64) 16896
lstm_713 (LSTM) (None, 32) 12416
dropout_764 (Dropout) (None, 32) 0
dense_1988 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola28_LSTM7_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best7_lstm_vola28 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM7.
import os
from joblib import dump, load
# Suponiendo que ts_model es una lista de modelos
history_vola28_LSTM7 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM7_vola28_7):
filename = f'history_vola28_LSTM7_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola28_lstm_7, y=y_train_vola28_7, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best7_lstm_vola28], validation_data=(X_val_vola28_lstm_7, y_val_vola28_7),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola28_LSTM7.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola28_LSTM7_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM7_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM7_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM7_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM7_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM7_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM7_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM7_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM7_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM7_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM7_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM7_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM7_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM7_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM7_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM7_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola28_LSTM7_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best7_lstm_vola28 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best7_lstm_vola28 = load_model(best_model_path) # Cargar el modelo
if best7_lstm_vola21 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola28_LSTM7_weights.20-0.0010.keras con val_loss: 0.001
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best7_lstm_vola28 is not None:
# Asegúrate de que X_val_price7 y X_test_price7 tengan la forma correcta
train_preds_vola28_LSTM7 = best7_lstm_vola28.predict(X_train_vola28_lstm_7)
val_preds_vola28_LSTM7 = best7_lstm_vola28.predict(X_val_vola28_lstm_7)
test_preds_vola28_LSTM7 = best7_lstm_vola28.predict(X_test_vola28_lstm_7)
# Aplanar las predicciones si es necesario
train_preds_vola28_LSTM7 = np.squeeze(train_preds_vola28_LSTM7)
val_preds_vola28_LSTM7 = np.squeeze(val_preds_vola28_LSTM7)
test_preds_vola28_LSTM7 = np.squeeze(test_preds_vola28_LSTM7)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola28_LSTM7)
print("Predicciones de validación:", val_preds_vola28_LSTM7)
print("Predicciones de prueba:", test_preds_vola28_LSTM7)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
156/156 [==============================] - 1s 872us/step
1/1 [==============================] - 0s 10ms/step
1/1 [==============================] - 0s 10ms/step
Predicciones de Entrenamiento: [0.00181503 0.00181503 0.00181503 ... 0.02586541 0.02572749 0.02568164]
Predicciones de validación: [0.02524202 0.0279711 0.03196964 0.03217679 0.03205559 0.03161073
0.03162515]
Predicciones de prueba: [0.03162203 0.03194185 0.0320471 0.03164462 0.03226221 0.03305304
0.03537688]
plot_model(data_train_plot_vola28[-100:], data_val_plot_vola28, data_test_plot_vola28, val_preds_vola28_LSTM7, test_preds_vola28_LSTM7, "Predicciones usando Memoria a Corto y Largo Paso (LSTM) para un horizonte de 7 días")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train7_vola28, jarque_bera_pval_LSTM_train7_vola28 = diagnostic_plots(y_train_vola28_7, train_preds_vola28_LSTM7)
Ljung-Box LB Statistic: 99.385388
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola28_LSTM_train = metricas(y_train_vola28_7,train_preds_vola28_LSTM7)
metrica_vola28_LSTM_train.index = metrica_vola28_LSTM_train.index.map({0: 'LSTM Entrenamiento Volatilidad ω = 28 y τ = 7'})
metrica_vola28_LSTM_train['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train7_vola28], index=metrica_vola28_LSTM_train.index)
metrica_vola28_LSTM_train['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train7_vola28], index=metrica_vola28_LSTM_train.index)
metrica_vola28_LSTM_train
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Volatilidad ω = 28 y τ = 7 | 0.6949 | 5.1% | 0.0 | 0.0 | 96.2% | 2.0785e-23 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM7_vola28, jarque_bera_pvalLSTM7_vola28 = evaluate_residuals(data_test_plot_vola28_7, test_preds_vola28_LSTM7)
No se rechaza H0: los residuales son independientes (no correlacionados).
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test_vola28 = metricas(y_test_vola28_7,test_preds_vola28_LSTM7)
metrica_LSTM_test_vola28.index = metrica_LSTM_test_vola28.index.map({0: 'LSTM Prueba Volatilidad ω = 28 y τ = 7'})
metrica_LSTM_test_vola28['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM7_vola28], index=metrica_LSTM_test_vola28.index)
metrica_LSTM_test_vola28['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM7_vola28], index=metrica_LSTM_test_vola28.index)
metrica_LSTM_test_vola28
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Volatilidad ω = 28 y τ = 7 | 8.2883e-06 | 2.37% | 0.0 | 0.0 | 60.11% | 0.2107 | 0.5019 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola28_LSTM7)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola28_7, train_preds_vola28_LSTM7, y_val_vola28_7, val_preds_vola28_LSTM7, y_test_vola28_7, test_preds_vola28_LSTM7)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra cercana del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 14 días (\(\tau=14\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_vola28_lstm_14, X_val_vola28_lstm_14, X_test_vola28_lstm_14 = change_dimension_lstm(X_train_vola28_14, X_val_vola28_14, X_test_vola28_14)
Shape of 3D arrays X: (4943, 14, 1) (14, 14, 1) (14, 14, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(14,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=10, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_vola28_lstm_14, y_train_vola28_14)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 29.2s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 29.3s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 29.4s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 29.4s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 29.7s
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 0.019221560638020996
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape14 = 14
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM14_vola28 = build_models_lstm(input_shape14, neurons_list, dropout_rates, 'SGD')
Model: "model_766"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_767 (InputLayer) [(None, 14, 1)] 0
lstm_716 (LSTM) (None, 14, 64) 16896
lstm_717 (LSTM) (None, 32) 12416
dropout_766 (Dropout) (None, 32) 0
dense_1990 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_766"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_767 (InputLayer) [(None, 14, 1)] 0
lstm_716 (LSTM) (None, 14, 64) 16896
lstm_717 (LSTM) (None, 32) 12416
dropout_766 (Dropout) (None, 32) 0
dense_1990 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_767"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_768 (InputLayer) [(None, 14, 1)] 0
lstm_718 (LSTM) (None, 14, 64) 16896
lstm_719 (LSTM) (None, 32) 12416
dropout_767 (Dropout) (None, 32) 0
dense_1991 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_767"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_768 (InputLayer) [(None, 14, 1)] 0
lstm_718 (LSTM) (None, 14, 64) 16896
lstm_719 (LSTM) (None, 32) 12416
dropout_767 (Dropout) (None, 32) 0
dense_1991 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_768"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_769 (InputLayer) [(None, 14, 1)] 0
lstm_720 (LSTM) (None, 14, 64) 16896
lstm_721 (LSTM) (None, 32) 12416
dropout_768 (Dropout) (None, 32) 0
dense_1992 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_768"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_769 (InputLayer) [(None, 14, 1)] 0
lstm_720 (LSTM) (None, 14, 64) 16896
lstm_721 (LSTM) (None, 32) 12416
dropout_768 (Dropout) (None, 32) 0
dense_1992 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_769"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_770 (InputLayer) [(None, 14, 1)] 0
lstm_722 (LSTM) (None, 14, 64) 16896
lstm_723 (LSTM) (None, 32) 12416
dropout_769 (Dropout) (None, 32) 0
dense_1993 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_769"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_770 (InputLayer) [(None, 14, 1)] 0
lstm_722 (LSTM) (None, 14, 64) 16896
lstm_723 (LSTM) (None, 32) 12416
dropout_769 (Dropout) (None, 32) 0
dense_1993 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_770"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_771 (InputLayer) [(None, 14, 1)] 0
lstm_724 (LSTM) (None, 14, 64) 16896
lstm_725 (LSTM) (None, 32) 12416
dropout_770 (Dropout) (None, 32) 0
dense_1994 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_770"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_771 (InputLayer) [(None, 14, 1)] 0
lstm_724 (LSTM) (None, 14, 64) 16896
lstm_725 (LSTM) (None, 32) 12416
dropout_770 (Dropout) (None, 32) 0
dense_1994 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_771"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_772 (InputLayer) [(None, 14, 1)] 0
lstm_726 (LSTM) (None, 14, 64) 16896
lstm_727 (LSTM) (None, 32) 12416
dropout_771 (Dropout) (None, 32) 0
dense_1995 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_771"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_772 (InputLayer) [(None, 14, 1)] 0
lstm_726 (LSTM) (None, 14, 64) 16896
lstm_727 (LSTM) (None, 32) 12416
dropout_771 (Dropout) (None, 32) 0
dense_1995 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_772"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_773 (InputLayer) [(None, 14, 1)] 0
lstm_728 (LSTM) (None, 14, 64) 16896
lstm_729 (LSTM) (None, 32) 12416
dropout_772 (Dropout) (None, 32) 0
dense_1996 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_772"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_773 (InputLayer) [(None, 14, 1)] 0
lstm_728 (LSTM) (None, 14, 64) 16896
lstm_729 (LSTM) (None, 32) 12416
dropout_772 (Dropout) (None, 32) 0
dense_1996 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_773"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_774 (InputLayer) [(None, 14, 1)] 0
lstm_730 (LSTM) (None, 14, 64) 16896
lstm_731 (LSTM) (None, 32) 12416
dropout_773 (Dropout) (None, 32) 0
dense_1997 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_773"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_774 (InputLayer) [(None, 14, 1)] 0
lstm_730 (LSTM) (None, 14, 64) 16896
lstm_731 (LSTM) (None, 32) 12416
dropout_773 (Dropout) (None, 32) 0
dense_1997 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_774"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_775 (InputLayer) [(None, 14, 1)] 0
lstm_732 (LSTM) (None, 14, 64) 16896
lstm_733 (LSTM) (None, 32) 12416
dropout_774 (Dropout) (None, 32) 0
dense_1998 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_774"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_775 (InputLayer) [(None, 14, 1)] 0
lstm_732 (LSTM) (None, 14, 64) 16896
lstm_733 (LSTM) (None, 32) 12416
dropout_774 (Dropout) (None, 32) 0
dense_1998 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_775"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_776 (InputLayer) [(None, 14, 1)] 0
lstm_734 (LSTM) (None, 14, 64) 16896
lstm_735 (LSTM) (None, 32) 12416
dropout_775 (Dropout) (None, 32) 0
dense_1999 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_775"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_776 (InputLayer) [(None, 14, 1)] 0
lstm_734 (LSTM) (None, 14, 64) 16896
lstm_735 (LSTM) (None, 32) 12416
dropout_775 (Dropout) (None, 32) 0
dense_1999 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_776"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_777 (InputLayer) [(None, 14, 1)] 0
lstm_736 (LSTM) (None, 14, 64) 16896
lstm_737 (LSTM) (None, 32) 12416
dropout_776 (Dropout) (None, 32) 0
dense_2000 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_776"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_777 (InputLayer) [(None, 14, 1)] 0
lstm_736 (LSTM) (None, 14, 64) 16896
lstm_737 (LSTM) (None, 32) 12416
dropout_776 (Dropout) (None, 32) 0
dense_2000 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_777"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_778 (InputLayer) [(None, 14, 1)] 0
lstm_738 (LSTM) (None, 14, 64) 16896
lstm_739 (LSTM) (None, 32) 12416
dropout_777 (Dropout) (None, 32) 0
dense_2001 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_777"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_778 (InputLayer) [(None, 14, 1)] 0
lstm_738 (LSTM) (None, 14, 64) 16896
lstm_739 (LSTM) (None, 32) 12416
dropout_777 (Dropout) (None, 32) 0
dense_2001 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_778"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_779 (InputLayer) [(None, 14, 1)] 0
lstm_740 (LSTM) (None, 14, 64) 16896
lstm_741 (LSTM) (None, 32) 12416
dropout_778 (Dropout) (None, 32) 0
dense_2002 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_778"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_779 (InputLayer) [(None, 14, 1)] 0
lstm_740 (LSTM) (None, 14, 64) 16896
lstm_741 (LSTM) (None, 32) 12416
dropout_778 (Dropout) (None, 32) 0
dense_2002 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_779"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_780 (InputLayer) [(None, 14, 1)] 0
lstm_742 (LSTM) (None, 14, 64) 16896
lstm_743 (LSTM) (None, 32) 12416
dropout_779 (Dropout) (None, 32) 0
dense_2003 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_779"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_780 (InputLayer) [(None, 14, 1)] 0
lstm_742 (LSTM) (None, 14, 64) 16896
lstm_743 (LSTM) (None, 32) 12416
dropout_779 (Dropout) (None, 32) 0
dense_2003 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_780"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_781 (InputLayer) [(None, 14, 1)] 0
lstm_744 (LSTM) (None, 14, 64) 16896
lstm_745 (LSTM) (None, 32) 12416
dropout_780 (Dropout) (None, 32) 0
dense_2004 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_780"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_781 (InputLayer) [(None, 14, 1)] 0
lstm_744 (LSTM) (None, 14, 64) 16896
lstm_745 (LSTM) (None, 32) 12416
dropout_780 (Dropout) (None, 32) 0
dense_2004 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_781"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_782 (InputLayer) [(None, 14, 1)] 0
lstm_746 (LSTM) (None, 14, 64) 16896
lstm_747 (LSTM) (None, 32) 12416
dropout_781 (Dropout) (None, 32) 0
dense_2005 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_781"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_782 (InputLayer) [(None, 14, 1)] 0
lstm_746 (LSTM) (None, 14, 64) 16896
lstm_747 (LSTM) (None, 32) 12416
dropout_781 (Dropout) (None, 32) 0
dense_2005 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola28_LSTM14_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best14_lstm_vola28 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM7.
import os
from joblib import dump, load
history_vola28_LSTM14 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM14_vola28):
filename = f'history_vola28_LSTM14_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola28_lstm_14, y=y_train_vola28_14, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best14_lstm_vola28], validation_data=(X_val_vola28_lstm_14, y_val_vola28_14),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola28_LSTM14.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola28_LSTM14_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM14_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM14_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM14_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM14_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM14_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM14_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM14_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM14_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM14_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM14_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM14_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM14_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM14_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM14_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM14_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola28_LSTM14_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best14_lstm_vola28 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best14_lstm_vola28 = load_model(best_model_path) # Cargar el modelo
if best14_lstm_vola28 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola28_LSTM14_weights.19-0.0006.keras con val_loss: 0.0006
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best14_lstm_vola28 is not None:
train_preds_vola28_LSTM14 = best14_lstm_vola28.predict(X_train_vola28_lstm_14)
val_preds_vola28_LSTM14 = best14_lstm_vola28.predict(X_val_vola28_lstm_14)
test_preds_vola28_LSTM14 = best14_lstm_vola28.predict(X_test_vola28_lstm_14)
# Aplanar las predicciones si es necesario
train_preds_vola28_LSTM14 = np.squeeze(train_preds_vola28_LSTM14)
val_preds_vola28_LSTM14 = np.squeeze(val_preds_vola28_LSTM14)
test_preds_vola28_LSTM14 = np.squeeze(test_preds_vola28_LSTM14)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola28_LSTM14)
print("Predicciones de validación:", val_preds_vola28_LSTM14)
print("Predicciones de prueba:", test_preds_vola28_LSTM14)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
155/155 [==============================] - 1s 2ms/step
1/1 [==============================] - 0s 11ms/step
1/1 [==============================] - 0s 17ms/step
Predicciones de Entrenamiento: [0.00024528 0.00024528 0.00024528 ... 0.02415646 0.0205919 0.02047076]
Predicciones de validación: [0.01977194 0.02030483 0.02037859 0.02126595 0.01980017 0.01971755
0.01999369 0.01989475 0.01684524 0.01667417 0.01703978 0.01726488
0.01605442 0.01608873]
Predicciones de prueba: [0.01605329 0.0177317 0.0189216 0.02426764 0.02540562 0.02530106
0.02536478 0.02496376 0.02771563 0.03183048 0.03209726 0.0320066
0.03164429 0.0316681 ]
plot_model(data_train_plot_vola28_14[-100:], data_val_plot_vola28_14, data_test_plot_vola28_14, val_preds_vola28_LSTM14, test_preds_vola28_LSTM14, "Predicciones usando Memoria a Corto y Largo Paso (LSTM)")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train14_vola28, jarque_bera_pval_LSTM_train14_vola28 = diagnostic_plots(y_train_vola14_28, train_preds_vola14_LSTM28)
Ljung-Box LB Statistic: 86.370329
Ljung-Box p-value: 0.000000
Se rechaza H0: hay autocorrelación en los residuales.
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola28_LSTM_train14 = metricas(y_train_vola28_14,train_preds_vola28_LSTM14)
metrica_vola28_LSTM_train14.index = metrica_vola28_LSTM_train14.index.map({0: 'LSTM Entrenamiento Volatilidada ω = 28 y τ = 14'})
metrica_vola28_LSTM_train14['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train14_vola28], index=metrica_vola28_LSTM_train14.index)
metrica_vola28_LSTM_train14['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train14_vola28], index=metrica_vola28_LSTM_train14.index)
metrica_vola28_LSTM_train14
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Volatilidada ω = 28 y τ = 14 | 0.6415 | 3.75% | 0.0 | 0.0 | 96.49% | 1.4921e-20 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM14_vola28, jarque_bera_pvalLSTM14_vola28 = evaluate_residuals(data_test_plot_vola28_14, test_preds_vola28_LSTM14)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test14_vola28 = metricas(y_test_vola28_14,test_preds_vola28_LSTM14)
metrica_LSTM_test14_vola28.index = metrica_LSTM_test14_vola28.index.map({0: 'LSTM Prueba Volatilidada ω = 28 y τ = 14'})
metrica_LSTM_test14_vola28['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM14_vola28], index=metrica_LSTM_test14_vola28.index)
metrica_LSTM_test14_vola28['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM14_vola28], index=metrica_LSTM_test14_vola28.index)
metrica_LSTM_test14_vola28
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Volatilidada ω = 28 y τ = 14 | 5.8923e-05 | 5.01% | 0.0 | 0.0 | 81.02% | 0.0221 | 0.8832 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola28_LSTM14)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola28_14, train_preds_vola28_LSTM14, y_val_vola28_14, val_preds_vola28_LSTM14, y_test_vola28_14, test_preds_vola28_LSTM14)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo, ademas muestra un sesgo considerable a la derecha y una alta variabilidad.
Conjunto de Validación: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Horizonte de 21 días (\(\tau=21\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_vola28_lstm_21, X_val_vola28_lstm_21, X_test_vola28_lstm_21 = change_dimension_lstm(X_train_vola28_21, X_val_vola28_21, X_test_vola28_21)
Shape of 3D arrays X: (4915, 21, 1) (21, 21, 1) (21, 21, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(21,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=10, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_vola28_lstm_21, y_train_vola28_21)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 41.9s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 42.1s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 42.4s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 42.4s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 42.5s
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 0.015903937048416174
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape21 = 21
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM21_vola28 = build_models_lstm(input_shape21, neurons_list, dropout_rates, 'SGD')
Model: "model_783"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_784 (InputLayer) [(None, 21, 1)] 0
lstm_750 (LSTM) (None, 21, 64) 16896
lstm_751 (LSTM) (None, 32) 12416
dropout_783 (Dropout) (None, 32) 0
dense_2007 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_783"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_784 (InputLayer) [(None, 21, 1)] 0
lstm_750 (LSTM) (None, 21, 64) 16896
lstm_751 (LSTM) (None, 32) 12416
dropout_783 (Dropout) (None, 32) 0
dense_2007 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_784"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_785 (InputLayer) [(None, 21, 1)] 0
lstm_752 (LSTM) (None, 21, 64) 16896
lstm_753 (LSTM) (None, 32) 12416
dropout_784 (Dropout) (None, 32) 0
dense_2008 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_784"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_785 (InputLayer) [(None, 21, 1)] 0
lstm_752 (LSTM) (None, 21, 64) 16896
lstm_753 (LSTM) (None, 32) 12416
dropout_784 (Dropout) (None, 32) 0
dense_2008 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_785"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_786 (InputLayer) [(None, 21, 1)] 0
lstm_754 (LSTM) (None, 21, 64) 16896
lstm_755 (LSTM) (None, 32) 12416
dropout_785 (Dropout) (None, 32) 0
dense_2009 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_785"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_786 (InputLayer) [(None, 21, 1)] 0
lstm_754 (LSTM) (None, 21, 64) 16896
lstm_755 (LSTM) (None, 32) 12416
dropout_785 (Dropout) (None, 32) 0
dense_2009 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_786"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_787 (InputLayer) [(None, 21, 1)] 0
lstm_756 (LSTM) (None, 21, 64) 16896
lstm_757 (LSTM) (None, 32) 12416
dropout_786 (Dropout) (None, 32) 0
dense_2010 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_786"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_787 (InputLayer) [(None, 21, 1)] 0
lstm_756 (LSTM) (None, 21, 64) 16896
lstm_757 (LSTM) (None, 32) 12416
dropout_786 (Dropout) (None, 32) 0
dense_2010 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_787"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_788 (InputLayer) [(None, 21, 1)] 0
lstm_758 (LSTM) (None, 21, 64) 16896
lstm_759 (LSTM) (None, 32) 12416
dropout_787 (Dropout) (None, 32) 0
dense_2011 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_787"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_788 (InputLayer) [(None, 21, 1)] 0
lstm_758 (LSTM) (None, 21, 64) 16896
lstm_759 (LSTM) (None, 32) 12416
dropout_787 (Dropout) (None, 32) 0
dense_2011 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_788"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_789 (InputLayer) [(None, 21, 1)] 0
lstm_760 (LSTM) (None, 21, 64) 16896
lstm_761 (LSTM) (None, 32) 12416
dropout_788 (Dropout) (None, 32) 0
dense_2012 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_788"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_789 (InputLayer) [(None, 21, 1)] 0
lstm_760 (LSTM) (None, 21, 64) 16896
lstm_761 (LSTM) (None, 32) 12416
dropout_788 (Dropout) (None, 32) 0
dense_2012 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_789"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_790 (InputLayer) [(None, 21, 1)] 0
lstm_762 (LSTM) (None, 21, 64) 16896
lstm_763 (LSTM) (None, 32) 12416
dropout_789 (Dropout) (None, 32) 0
dense_2013 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_789"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_790 (InputLayer) [(None, 21, 1)] 0
lstm_762 (LSTM) (None, 21, 64) 16896
lstm_763 (LSTM) (None, 32) 12416
dropout_789 (Dropout) (None, 32) 0
dense_2013 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_790"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_791 (InputLayer) [(None, 21, 1)] 0
lstm_764 (LSTM) (None, 21, 64) 16896
lstm_765 (LSTM) (None, 32) 12416
dropout_790 (Dropout) (None, 32) 0
dense_2014 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_790"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_791 (InputLayer) [(None, 21, 1)] 0
lstm_764 (LSTM) (None, 21, 64) 16896
lstm_765 (LSTM) (None, 32) 12416
dropout_790 (Dropout) (None, 32) 0
dense_2014 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_791"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_792 (InputLayer) [(None, 21, 1)] 0
lstm_766 (LSTM) (None, 21, 64) 16896
lstm_767 (LSTM) (None, 32) 12416
dropout_791 (Dropout) (None, 32) 0
dense_2015 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_791"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_792 (InputLayer) [(None, 21, 1)] 0
lstm_766 (LSTM) (None, 21, 64) 16896
lstm_767 (LSTM) (None, 32) 12416
dropout_791 (Dropout) (None, 32) 0
dense_2015 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_792"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_793 (InputLayer) [(None, 21, 1)] 0
lstm_768 (LSTM) (None, 21, 64) 16896
lstm_769 (LSTM) (None, 32) 12416
dropout_792 (Dropout) (None, 32) 0
dense_2016 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_792"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_793 (InputLayer) [(None, 21, 1)] 0
lstm_768 (LSTM) (None, 21, 64) 16896
lstm_769 (LSTM) (None, 32) 12416
dropout_792 (Dropout) (None, 32) 0
dense_2016 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_793"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_794 (InputLayer) [(None, 21, 1)] 0
lstm_770 (LSTM) (None, 21, 64) 16896
lstm_771 (LSTM) (None, 32) 12416
dropout_793 (Dropout) (None, 32) 0
dense_2017 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_793"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_794 (InputLayer) [(None, 21, 1)] 0
lstm_770 (LSTM) (None, 21, 64) 16896
lstm_771 (LSTM) (None, 32) 12416
dropout_793 (Dropout) (None, 32) 0
dense_2017 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_794"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_795 (InputLayer) [(None, 21, 1)] 0
lstm_772 (LSTM) (None, 21, 64) 16896
lstm_773 (LSTM) (None, 32) 12416
dropout_794 (Dropout) (None, 32) 0
dense_2018 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_794"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_795 (InputLayer) [(None, 21, 1)] 0
lstm_772 (LSTM) (None, 21, 64) 16896
lstm_773 (LSTM) (None, 32) 12416
dropout_794 (Dropout) (None, 32) 0
dense_2018 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_795"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_796 (InputLayer) [(None, 21, 1)] 0
lstm_774 (LSTM) (None, 21, 64) 16896
lstm_775 (LSTM) (None, 32) 12416
dropout_795 (Dropout) (None, 32) 0
dense_2019 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_795"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_796 (InputLayer) [(None, 21, 1)] 0
lstm_774 (LSTM) (None, 21, 64) 16896
lstm_775 (LSTM) (None, 32) 12416
dropout_795 (Dropout) (None, 32) 0
dense_2019 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_796"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_797 (InputLayer) [(None, 21, 1)] 0
lstm_776 (LSTM) (None, 21, 64) 16896
lstm_777 (LSTM) (None, 32) 12416
dropout_796 (Dropout) (None, 32) 0
dense_2020 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_796"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_797 (InputLayer) [(None, 21, 1)] 0
lstm_776 (LSTM) (None, 21, 64) 16896
lstm_777 (LSTM) (None, 32) 12416
dropout_796 (Dropout) (None, 32) 0
dense_2020 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_797"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_798 (InputLayer) [(None, 21, 1)] 0
lstm_778 (LSTM) (None, 21, 64) 16896
lstm_779 (LSTM) (None, 32) 12416
dropout_797 (Dropout) (None, 32) 0
dense_2021 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_797"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_798 (InputLayer) [(None, 21, 1)] 0
lstm_778 (LSTM) (None, 21, 64) 16896
lstm_779 (LSTM) (None, 32) 12416
dropout_797 (Dropout) (None, 32) 0
dense_2021 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_798"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_799 (InputLayer) [(None, 21, 1)] 0
lstm_780 (LSTM) (None, 21, 64) 16896
lstm_781 (LSTM) (None, 32) 12416
dropout_798 (Dropout) (None, 32) 0
dense_2022 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_798"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_799 (InputLayer) [(None, 21, 1)] 0
lstm_780 (LSTM) (None, 21, 64) 16896
lstm_781 (LSTM) (None, 32) 12416
dropout_798 (Dropout) (None, 32) 0
dense_2022 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola28_LSTM21_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best21_lstm_vola28 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM21.
import os
from joblib import dump, load
history_vola28_LSTM21 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM21_vola28):
filename = f'history_vola28_LSTM21_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola28_lstm_21, y=y_train_vola28_21, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best21_lstm_vola28], validation_data=(X_val_vola28_lstm_21, y_val_vola28_21),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola28_LSTM21.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola28_LSTM21_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM21_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM21_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM21_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM21_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM21_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM21_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM21_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM21_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM21_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM21_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM21_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM21_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM21_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM21_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM21_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola28_LSTM21_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best21_lstm_vola28 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best21_lstm_vola28 = load_model(best_model_path) # Cargar el modelo
if best21_lstm_vola28 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola28_LSTM21_weights.18-0.0008.keras con val_loss: 0.0008
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best21_lstm_vola28 is not None:
train_preds_vola28_LSTM21 = best21_lstm_vola28.predict(X_train_vola28_lstm_21)
val_preds_vola28_LSTM21 = best21_lstm_vola28.predict(X_val_vola28_lstm_21)
test_preds_vola28_LSTM21 = best21_lstm_vola28.predict(X_test_vola28_lstm_21)
# Aplanar las predicciones si es necesario
train_preds_vola28_LSTM21 = np.squeeze(train_preds_vola28_LSTM21)
val_preds_vola28_LSTM21 = np.squeeze(val_preds_vola28_LSTM21)
test_preds_vola28_LSTM21 = np.squeeze(test_preds_vola28_LSTM21)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola21_LSTM21)
print("Predicciones de validación:", val_preds_vola28_LSTM21)
print("Predicciones de prueba:", test_preds_vola28_LSTM21)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
154/154 [==============================] - 1s 3ms/step
1/1 [==============================] - 0s 23ms/step
1/1 [==============================] - 0s 15ms/step
Predicciones de Entrenamiento: [0.00065348 0.00065348 0.00065348 ... 0.03063774 0.03060195 0.03058668]
Predicciones de validación: [0.02774444 0.02896487 0.02881305 0.02845261 0.02822987 0.0295639
0.0296765 0.0296645 0.02896145 0.02874506 0.02739654 0.02676729
0.02675047 0.02676198 0.02682831 0.0232301 0.02293766 0.02357914
0.02399966 0.02043307 0.02028001]
Predicciones de prueba: [0.01952437 0.02009363 0.02017197 0.02107996 0.01960839 0.01952406
0.01977411 0.01969218 0.01663092 0.01643156 0.01675948 0.01702081
0.01581642 0.01583938 0.01578473 0.01748742 0.01870063 0.02411165
0.02530042 0.02525723 0.0252745 ]
plot_model(data_train_plot_vola28[-100:], data_val_plot_vola28, data_test_plot_vola28, val_preds_vola21_LSTM28, test_preds_vola21_LSTM28, "Predicciones usando Memoria a Corto y Largo Paso (LSTM)")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train21_vola28, jarque_bera_pval_LSTM_train21_vola28 = diagnostic_plots(y_train_vola28_21, train_preds_vola28_LSTM21)
Ljung-Box LB Statistic: 0.079913
Ljung-Box p-value: 0.777415
No se rechaza H0: los residuales son independientes (no correlacionados).
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola28_LSTM_train21 = metricas(y_train_vola28_21,train_preds_vola28_LSTM21)
metrica_vola28_LSTM_train21.index = metrica_vola28_LSTM_train21.index.map({0: 'LSTM Entrenamiento Volatilidad ω = 28 y τ = 21'})
metrica_vola28_LSTM_train21['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train21_vola28], index=metrica_vola28_LSTM_train21.index)
metrica_vola28_LSTM_train21['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train21_vola28], index=metrica_vola28_LSTM_train21.index)
metrica_vola28_LSTM_train21
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Volatilidad ω = 28 y τ = 21 | 0.6406 | 3.8% | 0.0 | 0.0 | 96.49% | 0.7774 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM21_vola28, jarque_bera_pvalLSTM21_vola28 = evaluate_residuals(data_test_plot_vola28_21, test_preds_vola28_LSTM21)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test21_vola28 = metricas(y_test_vola28_21,test_preds_vola28_LSTM21)
metrica_LSTM_test21_vola28.index = metrica_LSTM_test21_vola28.index.map({0: 'LSTM Prueba Volatilidad ω = 28 y τ = 21'})
metrica_LSTM_test21_vola28['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM21_vola28], index=metrica_LSTM_test21_vola28.index)
metrica_LSTM_test21_vola28['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM21_vola28], index=metrica_LSTM_test21_vola28.index)
metrica_LSTM_test21_vola28
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Volatilidad ω = 28 y τ = 21 | 5.3424e-05 | 4.83% | 0.0 | 0.0 | 75.52% | 5.4375e-05 | 0.5799 |
Horizonte de 28 días (\(\tau=28\))#
Indexación de parámetros para cambio de dimensión de X de 2D a 3D.
X_train_vola28_lstm_28, X_val_vola28_lstm_28, X_test_vola28_lstm_28 = change_dimension_lstm(X_train_vola28_28, X_val_vola28_28, X_test_vola28_28)
Shape of 3D arrays X: (4887, 28, 1) (28, 28, 1) (28, 28, 1)
A continuación, se lleva a cabo la búsqueda los hiperparámetros que optimizan nuestro modelo utilizando Kerasy GridSearch.
from sklearn.metrics import make_scorer, mean_absolute_error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Esto ignorará todos los logs de info y warnings.
# Define the LSTM network architecture
def create_lstm_model(optimizer, activation):
input_layer_lstm = Input(shape=(28,1), dtype='float32')
lstm_layer1= LSTM(64, return_sequences=True)(input_layer_lstm)
lstm_layer2 = LSTM(32, return_sequences=False)(lstm_layer1)
dropout_layer_lstm = Dropout(0.2)(lstm_layer2)
output_layer_lstm = Dense(1, activation='linear')(dropout_layer_lstm)
ts_model_lstm = Model(inputs=input_layer_lstm, outputs=output_layer_lstm)
ts_model_lstm.compile(loss='mean_absolute_error', optimizer=optimizer)
return ts_model_lstm
# definición de parametros
param_grid = {'activation': ['relu'], # Funciones de activación a probar 'activation': ['relu', 'tanh', 'sigmoid']
'epochs' : [20], #[20, 50, 100, 150]
'optimizer': ['SGD'] #['SGD', 'RMSprop', 'Adam']
}
# Configuración el Grid Search
scoring = make_scorer(mean_absolute_error)
model = KerasRegressor(build_fn=create_lstm_model, epochs=10, batch_size=16, verbose=0)
grid = GridSearchCV(estimator=model, param_grid=param_grid,cv=KFold(n_splits=5, shuffle=True), scoring=scoring, n_jobs = -1,verbose =2)
grid_result = grid.fit(X_train_vola28_lstm_28, y_train_vola28_28)
# Resultados del Grid Search
print(f"Mejor función de activación: {grid_result.best_params_['activation']}")
print(f"Mejor número de epocas: {grid_result.best_params_['epochs']}")
print(f"Mejor Longitud de paso μ (optimizer): {grid_result.best_params_['optimizer']}")
print(f"Mejor Puntuación: {grid_result.best_score_}")
Fitting 5 folds for each of 1 candidates, totalling 5 fits
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 50.0s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 50.2s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 50.5s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 50.8s
[CV] END ..........activation=relu, epochs=20, optimizer=SGD; total time= 51.2s
Mejor función de activación: relu
Mejor número de epocas: 20
Mejor Longitud de paso μ (optimizer): SGD
Mejor Puntuación: 0.016788452156996835
El modelo de predicción de series temporales se basa en una red neuronal recurrente que incluye una capa de entrada que se conecta a una capa LSTM. Esta capa LSTM considera 7 pasos temporales, equivalentes al número de datos históricos. La salida de la LSTM se genera únicamente a partir del último paso temporal. Cada uno de los n pasos temporales definidos en el shape (n,) cuenta con 32 neuronas ocultas.
La cantidad de pasos temporales (timesteps) y 1 indica el número de características (features) por cada paso temporal. Se utiliza return_sequences=True cuando es necesario enviar la secuencia completa de salidas a la capa siguiente, que puede ser otra capa recurrente, para este caso otra capa LSTM.
La salida de LSTM pasa a una capa de exclusión que elimina aleatoriamente el 20%, 40%, 60% y 80% de la entrada antes de pasar a la capa de salida, que tiene una única neurona oculta con una función de activación lineal
A idexación de parámetros función build_models_lstm con base a lo indicado en el númeral 3 del parcial práctico.
input_shape28 = 28
neurons_list = [10, 100, 1000, 10000]
dropout_rates = [0.2, 0.4, 0.6, 0.8]
models_LSTM28_vola28 = build_models_lstm(input_shape28, neurons_list, dropout_rates, 'SGD')
Model: "model_800"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_801 (InputLayer) [(None, 28, 1)] 0
lstm_784 (LSTM) (None, 28, 64) 16896
lstm_785 (LSTM) (None, 32) 12416
dropout_800 (Dropout) (None, 32) 0
dense_2024 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.2:
Model: "model_800"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_801 (InputLayer) [(None, 28, 1)] 0
lstm_784 (LSTM) (None, 28, 64) 16896
lstm_785 (LSTM) (None, 32) 12416
dropout_800 (Dropout) (None, 32) 0
dense_2024 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_801"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_802 (InputLayer) [(None, 28, 1)] 0
lstm_786 (LSTM) (None, 28, 64) 16896
lstm_787 (LSTM) (None, 32) 12416
dropout_801 (Dropout) (None, 32) 0
dense_2025 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.4:
Model: "model_801"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_802 (InputLayer) [(None, 28, 1)] 0
lstm_786 (LSTM) (None, 28, 64) 16896
lstm_787 (LSTM) (None, 32) 12416
dropout_801 (Dropout) (None, 32) 0
dense_2025 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_802"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_803 (InputLayer) [(None, 28, 1)] 0
lstm_788 (LSTM) (None, 28, 64) 16896
lstm_789 (LSTM) (None, 32) 12416
dropout_802 (Dropout) (None, 32) 0
dense_2026 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.6:
Model: "model_802"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_803 (InputLayer) [(None, 28, 1)] 0
lstm_788 (LSTM) (None, 28, 64) 16896
lstm_789 (LSTM) (None, 32) 12416
dropout_802 (Dropout) (None, 32) 0
dense_2026 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_803"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_804 (InputLayer) [(None, 28, 1)] 0
lstm_790 (LSTM) (None, 28, 64) 16896
lstm_791 (LSTM) (None, 32) 12416
dropout_803 (Dropout) (None, 32) 0
dense_2027 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10 neuronas y dropout 0.8:
Model: "model_803"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_804 (InputLayer) [(None, 28, 1)] 0
lstm_790 (LSTM) (None, 28, 64) 16896
lstm_791 (LSTM) (None, 32) 12416
dropout_803 (Dropout) (None, 32) 0
dense_2027 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_804"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_805 (InputLayer) [(None, 28, 1)] 0
lstm_792 (LSTM) (None, 28, 64) 16896
lstm_793 (LSTM) (None, 32) 12416
dropout_804 (Dropout) (None, 32) 0
dense_2028 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.2:
Model: "model_804"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_805 (InputLayer) [(None, 28, 1)] 0
lstm_792 (LSTM) (None, 28, 64) 16896
lstm_793 (LSTM) (None, 32) 12416
dropout_804 (Dropout) (None, 32) 0
dense_2028 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_805"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_806 (InputLayer) [(None, 28, 1)] 0
lstm_794 (LSTM) (None, 28, 64) 16896
lstm_795 (LSTM) (None, 32) 12416
dropout_805 (Dropout) (None, 32) 0
dense_2029 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.4:
Model: "model_805"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_806 (InputLayer) [(None, 28, 1)] 0
lstm_794 (LSTM) (None, 28, 64) 16896
lstm_795 (LSTM) (None, 32) 12416
dropout_805 (Dropout) (None, 32) 0
dense_2029 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_806"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_807 (InputLayer) [(None, 28, 1)] 0
lstm_796 (LSTM) (None, 28, 64) 16896
lstm_797 (LSTM) (None, 32) 12416
dropout_806 (Dropout) (None, 32) 0
dense_2030 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.6:
Model: "model_806"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_807 (InputLayer) [(None, 28, 1)] 0
lstm_796 (LSTM) (None, 28, 64) 16896
lstm_797 (LSTM) (None, 32) 12416
dropout_806 (Dropout) (None, 32) 0
dense_2030 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_807"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_808 (InputLayer) [(None, 28, 1)] 0
lstm_798 (LSTM) (None, 28, 64) 16896
lstm_799 (LSTM) (None, 32) 12416
dropout_807 (Dropout) (None, 32) 0
dense_2031 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 100 neuronas y dropout 0.8:
Model: "model_807"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_808 (InputLayer) [(None, 28, 1)] 0
lstm_798 (LSTM) (None, 28, 64) 16896
lstm_799 (LSTM) (None, 32) 12416
dropout_807 (Dropout) (None, 32) 0
dense_2031 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_808"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_809 (InputLayer) [(None, 28, 1)] 0
lstm_800 (LSTM) (None, 28, 64) 16896
lstm_801 (LSTM) (None, 32) 12416
dropout_808 (Dropout) (None, 32) 0
dense_2032 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.2:
Model: "model_808"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_809 (InputLayer) [(None, 28, 1)] 0
lstm_800 (LSTM) (None, 28, 64) 16896
lstm_801 (LSTM) (None, 32) 12416
dropout_808 (Dropout) (None, 32) 0
dense_2032 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_809"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_810 (InputLayer) [(None, 28, 1)] 0
lstm_802 (LSTM) (None, 28, 64) 16896
lstm_803 (LSTM) (None, 32) 12416
dropout_809 (Dropout) (None, 32) 0
dense_2033 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.4:
Model: "model_809"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_810 (InputLayer) [(None, 28, 1)] 0
lstm_802 (LSTM) (None, 28, 64) 16896
lstm_803 (LSTM) (None, 32) 12416
dropout_809 (Dropout) (None, 32) 0
dense_2033 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_810"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_811 (InputLayer) [(None, 28, 1)] 0
lstm_804 (LSTM) (None, 28, 64) 16896
lstm_805 (LSTM) (None, 32) 12416
dropout_810 (Dropout) (None, 32) 0
dense_2034 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.6:
Model: "model_810"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_811 (InputLayer) [(None, 28, 1)] 0
lstm_804 (LSTM) (None, 28, 64) 16896
lstm_805 (LSTM) (None, 32) 12416
dropout_810 (Dropout) (None, 32) 0
dense_2034 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_811"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_812 (InputLayer) [(None, 28, 1)] 0
lstm_806 (LSTM) (None, 28, 64) 16896
lstm_807 (LSTM) (None, 32) 12416
dropout_811 (Dropout) (None, 32) 0
dense_2035 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 1000 neuronas y dropout 0.8:
Model: "model_811"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_812 (InputLayer) [(None, 28, 1)] 0
lstm_806 (LSTM) (None, 28, 64) 16896
lstm_807 (LSTM) (None, 32) 12416
dropout_811 (Dropout) (None, 32) 0
dense_2035 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_812"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_813 (InputLayer) [(None, 28, 1)] 0
lstm_808 (LSTM) (None, 28, 64) 16896
lstm_809 (LSTM) (None, 32) 12416
dropout_812 (Dropout) (None, 32) 0
dense_2036 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.2:
Model: "model_812"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_813 (InputLayer) [(None, 28, 1)] 0
lstm_808 (LSTM) (None, 28, 64) 16896
lstm_809 (LSTM) (None, 32) 12416
dropout_812 (Dropout) (None, 32) 0
dense_2036 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_813"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_814 (InputLayer) [(None, 28, 1)] 0
lstm_810 (LSTM) (None, 28, 64) 16896
lstm_811 (LSTM) (None, 32) 12416
dropout_813 (Dropout) (None, 32) 0
dense_2037 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.4:
Model: "model_813"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_814 (InputLayer) [(None, 28, 1)] 0
lstm_810 (LSTM) (None, 28, 64) 16896
lstm_811 (LSTM) (None, 32) 12416
dropout_813 (Dropout) (None, 32) 0
dense_2037 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_814"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_815 (InputLayer) [(None, 28, 1)] 0
lstm_812 (LSTM) (None, 28, 64) 16896
lstm_813 (LSTM) (None, 32) 12416
dropout_814 (Dropout) (None, 32) 0
dense_2038 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.6:
Model: "model_814"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_815 (InputLayer) [(None, 28, 1)] 0
lstm_812 (LSTM) (None, 28, 64) 16896
lstm_813 (LSTM) (None, 32) 12416
dropout_814 (Dropout) (None, 32) 0
dense_2038 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Model: "model_815"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_816 (InputLayer) [(None, 28, 1)] 0
lstm_814 (LSTM) (None, 28, 64) 16896
lstm_815 (LSTM) (None, 32) 12416
dropout_815 (Dropout) (None, 32) 0
dense_2039 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
Modelo con 10000 neuronas y dropout 0.8:
Model: "model_815"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_816 (InputLayer) [(None, 28, 1)] 0
lstm_814 (LSTM) (None, 28, 64) 16896
lstm_815 (LSTM) (None, 32) 12416
dropout_815 (Dropout) (None, 32) 0
dense_2039 (Dense) (None, 1) 33
=================================================================
Total params: 29,345
Trainable params: 29,345
Non-trainable params: 0
_________________________________________________________________
----------------------------------------
Para entrenar el modelo, se utiliza la función fit() en el objeto del modelo, proporcionándole como argumentos X_train y y_train. El proceso de entrenamiento se lleva a cabo a lo largo de un número específico de épocas. Además, el parámetro batch_size especifica cuántas muestras del conjunto de entrenamiento se usarán en cada iteración de retropropagación (backpropagation).
El conjunto de datos de validación se utiliza para evaluar el rendimiento del modelo al finalizar cada época. Se emplea un objeto ModelCheckpoint que monitorea la función de pérdida en el conjunto de validación y almacena el modelo correspondiente a la época en la que esta función alcanza su valor más bajo.
Se define val_loss como el valor de la función de coste para los datos de validación cruzada y loss como el valor de la función de coste para los datos de entrenamiento. period=1 indica que el modelo se evaluará y potencialmente se guardará después de cada época.
from tensorflow.keras.callbacks import ModelCheckpoint
save_weights = os.path.join('keras_models', 'PRSA_data_vola28_LSTM28_weights.{epoch:02d}-{val_loss:.4f}.keras')
save_best28_lstm_vola28 = ModelCheckpoint(save_weights, monitor='val_loss', verbose=2,
save_best_only=True, save_weights_only=False, mode='min', save_freq='epoch');
A continuación se itera sobre cada uno de los modelos del objeto models_LSTM28_at.
import os
from joblib import dump, load
history_vola28_LSTM28 = []
# Iterar sobre cada modelo en la lista models_LSTM
for i, model in enumerate(models_LSTM28_vola28):
filename = f'history_vola28_LSTM28_model_{i}.joblib'
if os.path.exists(filename):
model_history = load(filename)
print(f"El archivo '{filename}' ya existe. Se ha cargado el historial del entrenamiento.")
else:
model_history = model.fit(x=X_train_vola28_lstm_28, y=y_train_vola28_28, batch_size=16, epochs=20,
verbose=2, callbacks=[save_best28_lstm_vola28], validation_data=(X_val_vola28_lstm_28, y_val_vola28_28),
shuffle=True)
dump(model_history.history, filename)
print(f"El entrenamiento del modelo {i + 1} se ha completado y el historial ha sido guardado en '{filename}'.")
# Almacenar el historial del modelo
history_vola28_LSTM28.append(model_history if isinstance(model_history, dict) else model_history.history)
El archivo 'history_vola28_LSTM28_model_0.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM28_model_1.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM28_model_2.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM28_model_3.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM28_model_4.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM28_model_5.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM28_model_6.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM28_model_7.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM28_model_8.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM28_model_9.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM28_model_10.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM28_model_11.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM28_model_12.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM28_model_13.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM28_model_14.joblib' ya existe. Se ha cargado el historial del entrenamiento.
El archivo 'history_vola28_LSTM28_model_15.joblib' ya existe. Se ha cargado el historial del entrenamiento.
En este caso, el parámetro verbose=2 indica que se mostrará una línea por cada época durante el entrenamiento. Los valores posibles son: 0 para no mostrar información, 1 para visualizar una barra de progreso y 2 para ver un registro por época.
Además, las muestras se mezclan de manera aleatoria antes de cada época, gracias a la opción shuffle=True.
Una vez completado el entrenamiento, se realizan predicciones sobre la concentración de Precio del Bitcoin utilizando los mejor modelos guardados. Las predicciones generadas, que corresponden al Precio del Bitcoin escalado, son transformadas inversamente para recuperar los valores originales. Asimismo, se evalúa la calidad del ajuste mediante el uso de métricas de medición como SEE, MAPE, MAD, MSD Y R2.
import os
import re
from tensorflow.keras.models import load_model
import numpy as np
# Directorio de modelos
model_dir = 'keras_models'
files = os.listdir(model_dir)
pattern = r"PRSA_data_vola28_LSTM28_weights\.(\d+)-([\d\.]+)\.keras"
best_val_loss = float('inf')
best_model_file = None
best28_lstm_vola28 = None
# Iterar sobre los archivos en el directorio
for file in files:
match = re.match(pattern, file)
if match:
epoch = int(match.group(1)) # Extraer la época
val_loss = float(match.group(2)) # Extraer el val_loss
# Verificar si el val_loss actual es mejor que el mejor encontrado
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_file = file # Actualizar el mejor archivo de modelo
# Cargar el mejor modelo si se encontró uno
if best_model_file:
best_model_path = os.path.join(model_dir, best_model_file)
print(f"Cargando el mejor modelo: {best_model_file} con val_loss: {best_val_loss}")
best28_lstm_vola28 = load_model(best_model_path) # Cargar el modelo
if best28_lstm_vola28 is None:
print("Error: No se pudo cargar el mejor modelo.")
else:
print("No se encontraron archivos de modelos que coincidan con el patrón.")
Cargando el mejor modelo: PRSA_data_vola28_LSTM28_weights.12-0.0006.keras con val_loss: 0.0006
A continuación estimamos las predicciones del modelo MLP para la epoca en donde la función de perdida alcanza su valor más bajo.
# Predicciones usando el mejor modelo
if best28_lstm_vola28 is not None:
train_preds_vola28_LSTM28 = best28_lstm_vola28.predict(X_train_vola28_lstm_28)
val_preds_vola28_LSTM28 = best28_lstm_vola28.predict(X_val_vola28_lstm_28)
test_preds_vola28_LSTM28 = best28_lstm_vola28.predict(X_test_vola28_lstm_28)
# Aplanar las predicciones si es necesario
train_preds_vola28_LSTM28 = np.squeeze(train_preds_vola28_LSTM28)
val_preds_vola28_LSTM28 = np.squeeze(val_preds_vola28_LSTM28)
test_preds_vola28_LSTM28 = np.squeeze(test_preds_vola28_LSTM28)
# Imprimir las predicciones
print("Predicciones de Entrenamiento:", train_preds_vola28_LSTM28)
print("Predicciones de validación:", val_preds_vola28_LSTM28)
print("Predicciones de prueba:", test_preds_vola28_LSTM28)
else:
print("No se puede realizar predicciones porque el mejor modelo no se ha cargado.")
153/153 [==============================] - 1s 3ms/step
1/1 [==============================] - 0s 13ms/step
1/1 [==============================] - 0s 11ms/step
Predicciones de Entrenamiento: [0.00069685 0.00069685 0.00069685 ... 0.0246336 0.02452765 0.02430882]
Predicciones de validación: [0.02420108 0.02385474 0.02230763 0.02366592 0.02443529 0.02425671
0.02415307 0.02413859 0.02464491 0.02504216 0.02451654 0.02449027
0.0278269 0.02824472 0.02836224 0.02797896 0.02796812 0.02736126
0.02792033 0.02805061 0.02804621 0.02789402 0.02899709 0.02893455
0.02859725 0.02836395 0.02956758 0.02975667]
Predicciones de prueba: [0.02975679 0.02910678 0.02886033 0.02760977 0.02694902 0.02689082
0.02690426 0.02696997 0.02368624 0.02318505 0.02374233 0.02418448
0.02096416 0.02059118 0.0198798 0.02036564 0.0204818 0.02132544
0.02004402 0.01987434 0.02009164 0.02004114 0.01724802 0.01686302
0.01714042 0.01741496 0.0163423 0.01628453]
plot_model(data_train_plot_vola28_28[-100:], data_val_plot_vola28_28, data_test_plot_vola28_28, val_preds_vola28_LSTM28, test_preds_vola28_LSTM28, "Predicciones usando Memoria a Corto y Largo Paso (LSTM)")
A continuación realizamos un análisis sobre los residuales y rendimiento de nuestro modelo con relación a nuestro conjunto de Entrenamiento(rendimiento) y test (rendimiento y residuales).
Conjunto de datos de Entrenamiento
A continuación se realiza el análisis de los residuales para el conjunto de entrenamiento.
ljung_box_pval_LSTM_train28_vola28, jarque_bera_pval_LSTM_train28_vola28 = diagnostic_plots(y_train_vola28_28, train_preds_vola28_LSTM28)
Ljung-Box LB Statistic: 0.627563
Ljung-Box p-value: 0.428251
No se rechaza H0: los residuales son independientes (no correlacionados).
Jarque-Bera p-value: 0.000000
Se rechaza H0: los residuales no siguen una distribución normal.
#calcular las métricas
metrica_vola28_LSTM_train28 = metricas(y_train_vola28_28,train_preds_vola28_LSTM28)
metrica_vola28_LSTM_train28.index = metrica_vola28_LSTM_train28.index.map({0: 'LSTM Entrenamiento Volatilidad ω = 28 y τ = 28'})
metrica_vola28_LSTM_train28['Ljung-Box p-value'] = pd.Series([ljung_box_pval_LSTM_train28_vola28], index=metrica_vola28_LSTM_train28.index)
metrica_vola28_LSTM_train28['Jarque-Bera p-value'] = pd.Series([jarque_bera_pval_LSTM_train28_vola28], index=metrica_vola28_LSTM_train28.index)
metrica_vola28_LSTM_train28
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Entrenamiento Volatilidad ω = 28 y τ = 28 | 0.6377 | 4.16% | 0.0 | 0.0 | 96.5% | 0.4283 | 0.0 |
Conjunto de datos de Prueba:
ljung_box_pvalLSTM28_vola28, jarque_bera_pvalLSTM28_vola28 = evaluate_residuals(data_test_plot_vola28_28, test_preds_vola28_LSTM28)
Se rechaza H0: hay autocorrelación en los residuales.
No se rechaza H0: los residuales siguen una distribución normal.
metrica_LSTM_test28_vola28 = metricas(y_test_vola28_28,test_preds_vola28_LSTM28)
metrica_LSTM_test28_vola28.index = metrica_LSTM_test28_vola28.index.map({0: 'LSTM Prueba Volatilidad ω = 21 y τ = 28'})
metrica_LSTM_test28_vola28['Ljung-Box p-value'] = pd.Series([ljung_box_pvalLSTM28_vola28], index=metrica_LSTM_test28_vola28.index)
metrica_LSTM_test28_vola28['Jarque-Bera p-value'] = pd.Series([jarque_bera_pvalLSTM28_vola28], index=metrica_LSTM_test28_vola28.index)
metrica_LSTM_test28_vola28
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| LSTM Prueba Volatilidad ω = 21 y τ = 28 | 5.0535e-05 | 4.11% | 0.0 | 0.0 | 89.38% | 1.7241e-06 | 0.7083 |
Curva Runs vs Error/Score :
plot_best_model_validation_loss(history_vola28_LSTM28)
Boxplot Errores Conjunto de Entrenamiento, Validación y Prueba :
errores_plots(y_train_vola28_28, train_preds_vola28_LSTM28, y_val_vola28_28, val_preds_vola28_LSTM28, y_test_vola28_28, test_preds_vola28_LSTM28)
De acuerdo a los resultados obtendios del gráfico de Run vs Error/Score y al compararlo con el gráfico boxplot de los residuales se observa lo siguiente:
Conjunto de Entrenamiento: La medía de los residuales se encuentra cerca considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Validación: La medía de los residuales se encuentra alejada del error/score correspondiente al val_loss de la epoca del mejor modelo.
Conjunto de Prueba: La medía de los residuales se encuentra alejada considerablemente del error/score correspondiente al val_loss de la epoca del mejor modelo.
Lo anterior es coherente con las métricas obtenidas para el modelo implementado.
Resumen de métricas#
En la tabla a continuación se encuentran registradas las métricas obtenidas para el conjunto de datos de entrenamiento para la variable Price.
metricas_df_train = pd.concat([metrica_price_MLP_train, metrica_price_MLP_train14, metrica_price_MLP_train21, metrica_price_MLP_train28,
metrica_price_LSTM_train, metrica_price_LSTM_train14, metrica_price_LSTM_train21, metrica_price_LSTM_train28,
metrica_at_MLP_train, metrica_at_MLP_train14, metrica_at_MLP_train21, metrica_at_MLP_train28,
metrica_at_LSTM_train, metrica_at_LSTM_train14, metrica_at_LSTM_train21, metrica_at_LSTM_train28,
metrica_vola7_MLP_train, metrica_vola7_MLP_train14, metrica_vola7_MLP_train21, metrica_vola7_MLP_train28,
metrica_vola7_LSTM_train, metrica_vola7_LSTM_train14, metrica_vola7_LSTM_train21, metrica_vola7_LSTM_train28,
metrica_vola14_MLP_train, metrica_vola14_MLP_train14, metrica_vola14_MLP_train21, metrica_vola14_MLP_train28,
metrica_vola14_LSTM_train, metrica_vola14_LSTM_train14, metrica_vola14_LSTM_train21, metrica_vola14_LSTM_train28,
metrica_vola21_MLP_train, metrica_vola21_MLP_train14, metrica_vola21_MLP_train21, metrica_vola21_MLP_train28,
metrica_vola21_LSTM_train, metrica_vola21_LSTM_train14, metrica_vola21_LSTM_train21, metrica_vola21_LSTM_train28,
metrica_vola28_MLP_train, metrica_vola28_MLP_train14, metrica_vola28_MLP_train21, metrica_vola28_MLP_train28,
metrica_vola28_LSTM_train, metrica_vola28_LSTM_train14, metrica_vola28_LSTM_train21, metrica_vola28_LSTM_train28], ignore_index=False)
metricas_df_train
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Entrenamiento Price τ = 7 | 4.5824e+09 | 9.01% | 416.59 | 9.2183e+05 | 99.62% | 0.0000e+00 | 0.0000e+00 |
| MLP Entrenamiento Price τ = 14 | 2.6896e+09 | 24.8% | 302.87 | 5.4413e+05 | 99.77% | 5.0699e-158 | 0.0000e+00 |
| MLP Entrenamiento Price τ = 21 | 2.8020e+09 | 40.45% | 311.40 | 5.7010e+05 | 99.75% | 1.5950e-221 | 0.0000e+00 |
| MLP Entrenamiento Price τ = 28 | 3.4518e+09 | 8.41% | 348.66 | 7.0632e+05 | 99.69% | 1.4424e-246 | 0.0000e+00 |
| LSTM Entrenamiento Price τ = 7 | 1.7288e+12 | 65.34% | 10422.74 | 3.4777e+08 | -44.87% | 0.0000e+00 | 0.0000e+00 |
| LSTM Entrenamiento Price τ = 14 | 1.6667e+12 | 67.82% | 10252.25 | 3.3718e+08 | -44.73% | 0.0000e+00 | 0.0000e+00 |
| LSTM Entrenamiento Price τ = 21 | 1.6280e+12 | 64.44% | 10128.41 | 3.3124e+08 | -44.31% | 0.0000e+00 | 0.0000e+00 |
| LSTM Entrenamiento Price τ = 28 | 1.5885e+12 | 66.24% | 10000.76 | 3.2505e+08 | -43.89% | 0.0000e+00 | 0.0000e+00 |
| MLP Entrenamiento Retorno Acumulado τ = 7 | 3.1055e+03 | 4.87% | 0.69 | 6.2000e-01 | 98.32% | 0.0000e+00 | 1.4771e-18 |
| MLP Entrenamiento Retorno Acumulado τ = 14 | 9.4103e+01 | 0.79% | 0.08 | 2.0000e-02 | 99.95% | 0.0000e+00 | 0.0000e+00 |
| MLP Entrenamiento Retorno Acumulado τ = 21 | 2.2520e+02 | 1.16% | 0.13 | 5.0000e-02 | 99.87% | 0.0000e+00 | 0.0000e+00 |
| MLP Entrenamiento Retorno Acumulado τ = 28 | 2.8950e+02 | 1.57% | 0.16 | 6.0000e-02 | 99.84% | 0.0000e+00 | 0.0000e+00 |
| LSTM Entrenamiento Retorno Acumulado τ = 7 | 7.3388e+02 | 2.18% | 0.34 | 1.5000e-01 | 99.6% | 0.0000e+00 | 0.0000e+00 |
| LSTM Entrenamiento Retorno Acumulado τ = 14 | 2.2093e+03 | 3.94% | 0.60 | 4.5000e-01 | 98.78% | 0.0000e+00 | 0.0000e+00 |
| LSTM Entrenamiento Retorno Acumulado τ = 21 | 2.5803e+02 | 1.25% | 0.17 | 5.0000e-02 | 99.86% | 0.0000e+00 | 0.0000e+00 |
| LSTM Entrenamiento Retorno Acumulado τ = 28 | 9.3087e+01 | 0.97% | 0.10 | 2.0000e-02 | 99.95% | 0.0000e+00 | 0.0000e+00 |
| MLP Entrenamiento Volatilidad ω = 7 y τ = 7 | 2.5823e+00 | 17.98% | 0.01 | 0.0000e+00 | 87.67% | 4.7510e-08 | 0.0000e+00 |
| MLP Entrenamiento Volatilidad ω = 7 y τ = 14 | 3.7256e+00 | 17.22% | 0.01 | 0.0000e+00 | 82.18% | 5.7162e-215 | 0.0000e+00 |
| MLP Entrenamiento Volatilidad ω = 7 y τ = 21 | 3.8423e+00 | 16.05% | 0.01 | 0.0000e+00 | 81.61% | 2.4903e-268 | 0.0000e+00 |
| MLP Entrenamiento Volatilidad ω = 7 y τ = 28 | 3.5159e+00 | 19.21% | 0.01 | 0.0000e+00 | 83.16% | 1.5062e-172 | 0.0000e+00 |
| LSTM Entrenamiento Volatilidad ω = 7 y τ = 7 | 3.7767e+00 | 25.3% | 0.01 | 0.0000e+00 | 81.96% | 1.0236e-203 | 0.0000e+00 |
| LSTM Entrenamiento Volatilidada ω = 7 y τ = 14 | 2.2577e+01 | 70.72% | 0.03 | 0.0000e+00 | -7.96% | 0.0000e+00 | 0.0000e+00 |
| LSTM Entrenamiento Volatilidad ω = 7 y τ = 21 | 3.1251e+00 | 16.51% | 0.01 | 0.0000e+00 | 85.04% | 4.9510e-24 | 0.0000e+00 |
| LSTM Entrenamiento Volatilidad ω = 7 y τ = 28 | 1.5382e+02 | 722.37% | 0.17 | 3.0000e-02 | -636.93% | 0.0000e+00 | 0.0000e+00 |
| MLP Entrenamiento Volatilidad ω = 14 y τ = 7 | 1.9199e+00 | 16.54% | 0.01 | 0.0000e+00 | 90.16% | 1.1289e-226 | 0.0000e+00 |
| MLP Entrenamiento Volatilidad ω = 14 y τ = 14 | 1.6660e+00 | 7.83% | 0.00 | 0.0000e+00 | 91.45% | 3.0034e-122 | 0.0000e+00 |
| MLP Entrenamiento Volatilidad ω = 14 y τ = 21 | 1.7507e+00 | 8.31% | 0.00 | 0.0000e+00 | 91.0% | 4.5340e-221 | 0.0000e+00 |
| MLP Entrenamiento Volatilidad ω = 14 y τ = 28 | 1.1029e+00 | 10.11% | 0.00 | 0.0000e+00 | 94.33% | 5.5134e-17 | 0.0000e+00 |
| LSTM Entrenamiento Volatilidad ω = 14 y τ = 7 | 1.4121e+00 | 8.87% | 0.00 | 0.0000e+00 | 92.76% | 1.0995e-04 | 0.0000e+00 |
| LSTM Entrenamiento Volatilidada ω = 14 y τ = 14 | 1.4431e+00 | 8.83% | 0.00 | 0.0000e+00 | 92.6% | 3.1792e-12 | 0.0000e+00 |
| LSTM Entrenamiento Volatilidad ω = 14 y τ = 21 | 1.3849e+00 | 7.52% | 0.00 | 0.0000e+00 | 92.88% | 9.4864e-07 | 0.0000e+00 |
| LSTM Entrenamiento Volatilidad ω = 14 y τ = 28 | 1.4339e+00 | 8.84% | 0.00 | 0.0000e+00 | 92.63% | 1.4921e-20 | 0.0000e+00 |
| MLP Entrenamiento Volatilidad ω = 21 y τ = 7 | 1.3412e+00 | 8.34% | 0.00 | 0.0000e+00 | 92.87% | 7.8915e-219 | 0.0000e+00 |
| MLP Entrenamiento Volatilidad ω = 21 y τ = 14 | 1.0729e+00 | 6.34% | 0.00 | 0.0000e+00 | 94.29% | 1.7769e-90 | 0.0000e+00 |
| MLP Entrenamiento Volatilidad ω = 21 y τ = 21 | 1.0415e+00 | 5.61% | 0.00 | 0.0000e+00 | 94.45% | 1.8367e-103 | 0.0000e+00 |
| MLP Entrenamiento Volatilidad ω = 21 y τ = 28 | 7.1784e-01 | 7.63% | 0.00 | 0.0000e+00 | 96.17% | 5.0992e-03 | 0.0000e+00 |
| LSTM Entrenamiento Volatilidad ω = 21 y τ = 7 | 1.2912e+00 | 11.0% | 0.01 | 0.0000e+00 | 93.14% | 1.6833e-156 | 0.0000e+00 |
| LSTM Entrenamiento Volatilidada ω = 21 y τ = 14 | 9.4141e-01 | 6.88% | 0.00 | 0.0000e+00 | 94.99% | 9.4864e-07 | 0.0000e+00 |
| LSTM Entrenamiento Volatilidad ω = 21 y τ = 21 | 8.7630e-01 | 5.2% | 0.00 | 0.0000e+00 | 95.33% | 4.1786e-01 | 0.0000e+00 |
| LSTM Entrenamiento Volatilidad ω = 21 y τ = 28 | 8.8141e-01 | 5.44% | 0.00 | 0.0000e+00 | 95.3% | 4.5168e-07 | 0.0000e+00 |
| MLP Entrenamiento Volatilidad ω = 28 y τ = 7 | 1.0324e+00 | 5.97% | 0.00 | 0.0000e+00 | 94.36% | 2.9829e-271 | 0.0000e+00 |
| MLP Entrenamiento Volatilidad ω = 28 y τ = 14 | 7.7663e-01 | 5.47% | 0.00 | 0.0000e+00 | 95.75% | 3.2173e-67 | 0.0000e+00 |
| MLP Entrenamiento Volatilidad ω = 28 y τ = 21 | 7.5729e-01 | 4.54% | 0.00 | 0.0000e+00 | 95.85% | 2.3543e-78 | 0.0000e+00 |
| MLP Entrenamiento Volatilidad ω = 28 y τ = 28 | 6.6786e-01 | 6.35% | 0.00 | 0.0000e+00 | 96.34% | 7.0124e-100 | 0.0000e+00 |
| LSTM Entrenamiento Volatilidad ω = 28 y τ = 7 | 6.9489e-01 | 5.1% | 0.00 | 0.0000e+00 | 96.2% | 2.0785e-23 | 0.0000e+00 |
| LSTM Entrenamiento Volatilidada ω = 28 y τ = 14 | 6.4152e-01 | 3.75% | 0.00 | 0.0000e+00 | 96.49% | 1.4921e-20 | 0.0000e+00 |
| LSTM Entrenamiento Volatilidad ω = 28 y τ = 21 | 6.4057e-01 | 3.8% | 0.00 | 0.0000e+00 | 96.49% | 7.7741e-01 | 0.0000e+00 |
| LSTM Entrenamiento Volatilidad ω = 28 y τ = 28 | 6.3765e-01 | 4.16% | 0.00 | 0.0000e+00 | 96.5% | 4.2825e-01 | 0.0000e+00 |
El análisis de los modelos de predicción revela un rendimiento variado entre las configuraciones de MLP (Perceptrón Multicapa) y LSTM (Memoria a Largo Plazo) en diferentes métricas. En general, los modelos de MLP exhiben un desempeño superior en las métricas de error (SSE, MAPE, MAD y MSD) en comparación con LSTM, especialmente en el caso del “Entrenamiento Volatilidad” con τ = 28, donde el MLP muestra el menor SSE (6.6786e-01) y el MAPE más bajo (6.35%). Sin embargo, el R² más alto se observa en MLP para “Volatilidad ω = 28 y τ = 28”, alcanzando un 96.5%. En contraste, los LSTM tienden a tener mayores SSE y MAPE, con valores que superan en varias instancias los 1.5e+12 y 65%, respectivamente, lo que indica un ajuste menos eficaz a los datos. Adicionalmente, los valores de p-value de Ljung-Box y Jarque-Bera sugieren que muchos modelos presentan problemas de autocorrelación y no normalidad en los residuos, especialmente en los modelos LSTM. En conclusión, el modelo MLP Entrenamiento Volatilidad ω = 28 y τ = 28 se destaca como el mejor, ofreciendo los errores más bajos y el mejor ajuste a los datos.
Mejor modelo: MLP Entrenamiento Volatilidad ω = 28 y τ = 28.
SSE mínimo: 6.6786e-01.
MAPE más bajo: 6.35%.
R² más alto: 96.5%.
En la tabla a continuación se encuentran registradas las métricas obtenidas para el conjunto de datos de prueba para la variable Retorno Acumulado.
metricas_df_test = pd.concat([metrica_MLP7_test, metrica_MLP14_test, metrica_MLP21_test, metrica_MLP28_test,
metrica_LSTM_test, metrica_LSTM_test14, metrica_LSTM_test21, metrica_LSTM_test28,
metrica_MLP7_test_at, metrica_MLP14_test_at, metrica_MLP21_test_at, metrica_MLP28_test_at,
metrica_LSTM_test_at, metrica_LSTM_test_at14, metrica_LSTM_test21_at, metrica_LSTM_test28_at,
metrica_MLP7_test_vola7, metrica_MLP14_test_vola7, metrica_MLP21_test_vola7, metrica_MLP28_test_vola7,
metrica_LSTM_test_vola7, metrica_LSTM_test14_vola7, metrica_LSTM_test21_vola7, metrica_LSTM_test28_vola7,
metrica_MLP7_test_vola14, metrica_MLP14_test_vola14, metrica_MLP21_test_vola14, metrica_MLP28_test_vola14,
metrica_LSTM_test_vola14, metrica_LSTM_test14_vola14, metrica_LSTM_test21_vola14, metrica_LSTM_test28_vola14,
metrica_MLP7_test_vola21, metrica_MLP14_test_vola21, metrica_MLP21_test_vola21, metrica_MLP28_test_vola21,
metrica_LSTM_test_vola21, metrica_LSTM_test14_vola21, metrica_LSTM_test21_vola21, metrica_LSTM_test28_vola21,
metrica_MLP7_test_vola28, metrica_MLP14_test_vola28, metrica_MLP21_test_vola28, metrica_MLP28_test_vola28,
metrica_LSTM_test_vola28, metrica_LSTM_test14_vola28, metrica_LSTM_test21_vola28, metrica_LSTM_test28_vola28], ignore_index=False)
metricas_df_test
| SSE | MAPE | MAD | MSD | R2 | Ljung-Box p-value | Jarque-Bera p-value | |
|---|---|---|---|---|---|---|---|
| MLP Prueba Price τ = 7 | 9.6041e+07 | 4.17% | 2851.11 | 1.3720e+07 | -125.85% | 3.1399e-01 | 0.6674 |
| MLP Prueba Price τ = 14 | 1.0227e+08 | 3.11% | 1936.37 | 7.3047e+06 | 58.69% | 2.1899e-03 | 0.5157 |
| MLP Prueba Price τ = 21 | 8.6674e+07 | 2.34% | 1314.14 | 4.1273e+06 | 80.51% | 1.8809e-04 | 0.1965 |
| MLP Prueba Price τ = 28 | 5.0452e+07 | 2.29% | 1087.84 | 1.8018e+06 | 88.21% | 5.4064e-05 | 0.3951 |
| LSTM Prueba Price τ = 7 | 3.4305e+10 | 99.7% | 69961.76 | 4.9007e+09 | -80571.1% | 8.9358e-02 | 0.7343 |
| LSTM Prueba Price τ = 7 | 5.6866e+10 | 99.67% | 63593.77 | 4.0619e+09 | -22868.46% | 2.4861e-02 | 0.7014 |
| LSTM Prueba Price τ = 21 | 6.2208e+10 | 99.62% | 54232.21 | 2.9623e+09 | -13890.3% | 7.9639e-03 | 0.8356 |
| LSTM Prueba Price τ = 28 | 6.3986e+10 | 99.57% | 47643.90 | 2.2852e+09 | -14852.49% | 2.7070e-04 | 0.4270 |
| MLP Prueba Retorno Acumulado τ = 7 | 7.3211e-02 | 0.41% | 0.10 | 1.0000e-02 | -775.94% | 1.8205e-01 | 0.7553 |
| MLP Prueba Retorno Acumulado τ = 14 | 7.6389e-02 | 0.28% | 0.07 | 1.0000e-02 | -6.65% | 2.1899e-03 | 0.5157 |
| MLP Prueba Retorno Acumulado τ = 21 | 1.9116e-01 | 0.34% | 0.08 | 1.0000e-02 | -27.19% | 3.0383e-04 | 0.4259 |
| MLP Prueba Retorno Acumulado τ = 28 | 3.7658e-01 | 0.42% | 0.10 | 1.0000e-02 | -89.75% | 1.2996e-04 | 0.7522 |
| LSTM Prueba Retorno Acumulado τ = 7 | 4.2989e-01 | 1.03% | 0.24 | 6.0000e-02 | -5043.45% | 1.0014e-01 | 0.7179 |
| LSTM Prueba Retorno Acumulado τ = 14 | 4.7329e+05 | 777.81% | 183.87 | 3.3807e+04 | -660795871.98% | 5.9897e-02 | 0.7427 |
| LSTM Prueba Retorno Acumulado τ = 21 | 2.1934e+00 | 1.33% | 0.31 | 1.0000e-01 | -1359.33% | 1.6237e-02 | 0.8636 |
| LSTM Prueba Retorno Acumulado τ = 28 | 6.8384e-01 | 0.63% | 0.15 | 2.0000e-02 | -244.58% | 3.6643e-04 | 0.6241 |
| MLP Prueba Volatilidad ω = 7 y τ = 7 | 5.2289e-04 | 28.82% | 0.01 | 0.0000e+00 | -4.83% | 1.9644e-01 | 0.6142 |
| MLP Prueba Volatilidad ω = 7 τ = 14 | 6.8199e-04 | 14.26% | 0.01 | 0.0000e+00 | 18.43% | 7.5350e-03 | 0.5912 |
| MLP Prueba Volatilidad ω = 7 y τ = 21 | 3.7456e-04 | 18.01% | 0.00 | 0.0000e+00 | 85.62% | 1.1583e-03 | 0.4394 |
| MLP Prueba Volatilidad ω = 7 y τ = 28 | 5.0188e-04 | 26.09% | 0.00 | 0.0000e+00 | -6.44% | 2.7413e-05 | 0.6066 |
| LSTM Prueba Volatilidad ω = 7 y τ = 7 | 8.9637e-04 | 40.63% | 0.01 | 0.0000e+00 | -79.71% | 1.0001e-01 | 0.5787 |
| LSTM Prueba Volatilidada ω = 7 y τ = 14 | 1.8060e-02 | 86.89% | 0.04 | 0.0000e+00 | -2059.9% | 6.4681e-04 | 0.5676 |
| LSTM Prueba Volatilidad ω = 7 y τ = 21 | 4.3315e-04 | 16.96% | 0.00 | 0.0000e+00 | 83.37% | 1.3204e-03 | 0.3839 |
| LSTM Prueba Volatilidad ω = 7 y τ = 28 | 7.7546e-01 | 1222.52% | 0.17 | 3.0000e-02 | -164359.56% | 6.6758e-06 | 0.3047 |
| MLP Prueba Volatilidad ω = 14 y τ = 7 | 5.8058e-05 | 5.78% | 0.00 | 0.0000e+00 | 12.02% | 5.9678e-02 | 0.5949 |
| MLP Prueba Volatilidad ω = 14 τ = 14 | 1.5989e-04 | 6.6% | 0.00 | 0.0000e+00 | 77.85% | 1.4822e-02 | 0.0687 |
| MLP Prueba Volatilidad ω = 14 y τ = 21 | 1.0870e-04 | 5.67% | 0.00 | 0.0000e+00 | 86.73% | 3.6443e-03 | 0.3858 |
| MLP Prueba Volatilidad ω = 14 y τ = 28 | 4.2222e-04 | 18.21% | 0.00 | 0.0000e+00 | -135.94% | 3.1530e-05 | 0.1321 |
| LSTM Prueba Volatilidad ω = 14 y τ = 7 | 6.3947e-05 | 5.38% | 0.00 | 0.0000e+00 | 3.1% | 6.0408e-02 | 0.6416 |
| LSTM Prueba Volatilidada ω = 14 y τ = 14 | 2.0540e-04 | 8.62% | 0.00 | 0.0000e+00 | 71.54% | 4.0772e-02 | 0.4244 |
| LSTM Prueba Volatilidad ω = 14 y τ = 21 | 1.0018e-04 | 4.92% | 0.00 | 0.0000e+00 | 87.77% | 1.3204e-03 | 0.3839 |
| LSTM Prueba Volatilidad ω = 14 y τ = 28 | 8.0498e-01 | 958.13% | 0.17 | 3.0000e-02 | -449719.48% | 4.6261e-06 | 0.5100 |
| MLP Prueba Volatilidad ω = 21 y τ = 7 | 1.9325e-05 | 3.08% | 0.00 | 0.0000e+00 | 41.9% | 9.7080e-01 | 0.5649 |
| MLP Prueba Volatilidad ω = 21 τ = 14 | 9.4877e-05 | 6.48% | 0.00 | 0.0000e+00 | 77.73% | 4.2840e-02 | 0.4961 |
| MLP Prueba Volatilidad ω = 21 y τ = 21 | 6.8749e-05 | 5.6% | 0.00 | 0.0000e+00 | 84.5% | 8.6884e-03 | 0.7920 |
| MLP Prueba Volatilidad ω = 21 y τ = 28 | 1.3454e-04 | 10.46% | 0.00 | 0.0000e+00 | 57.38% | 3.2944e-06 | 0.2533 |
| LSTM Prueba Volatilidad ω = 21 y τ = 7 | 2.0359e-05 | 3.2% | 0.00 | 0.0000e+00 | 38.79% | 9.7817e-01 | 0.5669 |
| LSTM Prueba Volatilidada ω = 21 y τ = 14 | 1.2001e-04 | 7.81% | 0.00 | 0.0000e+00 | 71.83% | 4.0074e-02 | 0.5070 |
| LSTM Prueba Volatilidad ω = 21 y τ = 21 | 7.1796e-05 | 5.49% | 0.00 | 0.0000e+00 | 83.82% | 7.8782e-03 | 0.6907 |
| LSTM Prueba Volatilidad ω = 21 y τ = 28 | 7.6550e-05 | 5.21% | 0.00 | 0.0000e+00 | 75.75% | 4.2251e-06 | 0.2692 |
| MLP Prueba Volatilidad ω = 28 y τ = 7 | 1.0452e-05 | 2.75% | 0.00 | 0.0000e+00 | 49.69% | 2.1090e-01 | 0.4604 |
| MLP Prueba Volatilidad ω = 28 τ = 14 | 8.3423e-05 | 7.97% | 0.00 | 0.0000e+00 | 73.12% | 3.5600e-02 | 0.7552 |
| MLP Prueba Volatilidad ω = 28 y τ = 21 | 1.2227e-04 | 9.38% | 0.00 | 0.0000e+00 | 43.97% | 1.2075e-02 | 0.1292 |
| MLP Prueba Volatilidad ω = 21 y τ = 28 | 6.9863e-05 | 5.79% | 0.00 | 0.0000e+00 | 85.32% | 1.6950e-06 | 0.7195 |
| LSTM Prueba Volatilidad ω = 28 y τ = 7 | 8.2883e-06 | 2.37% | 0.00 | 0.0000e+00 | 60.11% | 2.1066e-01 | 0.5019 |
| LSTM Prueba Volatilidada ω = 28 y τ = 14 | 5.8923e-05 | 5.01% | 0.00 | 0.0000e+00 | 81.02% | 2.2063e-02 | 0.8832 |
| LSTM Prueba Volatilidad ω = 28 y τ = 21 | 5.3424e-05 | 4.83% | 0.00 | 0.0000e+00 | 75.52% | 5.4375e-05 | 0.5799 |
| LSTM Prueba Volatilidad ω = 21 y τ = 28 | 5.0535e-05 | 4.11% | 0.00 | 0.0000e+00 | 89.38% | 1.7241e-06 | 0.7083 |
El análisis de los resultados obtenidos de los modelos MLP y LSTM en los datos de prueba revela diferencias significativas en su desempeño según las métricas evaluadas. Para el caso de los precios, los modelos MLP muestran un mejor desempeño que los LSTM en términos de errores como SSE (Suma de Errores Cuadrados) y MAPE (Error Porcentual Absoluto Medio). Por ejemplo, el MLP para τ = 28 tiene un SSE de 5.0452e+07 y un MAPE de 2.29%, lo que indica una buena capacidad de predicción, mientras que los LSTM presentan valores de SSE mucho más altos, superando los 3.4e+10 y con un MAPE que llega hasta el 99.7%.
En cuanto a los retornos acumulados, los modelos MLP también sobresalen, destacando el MLP para τ = 14 con un MAPE de solo 0.28%, mientras que el LSTM para τ = 14 muestra un MAPE desmesurado de 777.81%. En el análisis de volatilidad, los MLP continúan demostrando una mayor precisión, con menores MAPE en comparación con los LSTM. En resumen, los modelos MLP tienen un rendimiento superior en las predicciones de precios, retornos acumulados y volatilidad en los datos de prueba, en comparación con los modelos LSTM, lo que sugiere que el enfoque MLP es más adecuado para estos conjuntos de datos en particular. MLP muestra un mejor desempeño general en SSE y MAPE en comparación con LSTM.
El modelo MLP para τ = 28 tiene el mejor desempeño en precios.
Para retornos acumulados, el MLP para τ = 14 es el más efectivo.
En análisis de volatilidad, los MLP también superan a los LSTM consistentemente.
El MLP es el modelo con mejor comportamiento para este análisis.